diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..6dc577ef1 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,71 @@ +name: Build + +on: push + +jobs: + + x86: + name: x86 + runs-on: ubuntu-20.04 + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Install packages + run: | + sudo dpkg --add-architecture i386 + sudo apt update + sudo apt install -y -o Acquire::Retries=50 \ + mtools syslinux isolinux \ + libc6-dev-i386 libc6-dbg:i386 valgrind + - name: Build (BIOS) + run: | + make -j 4 -C src + - name: Build (Everything) + run: | + make -j 4 -C src everything + - name: Test + run: | + valgrind ./src/bin-i386-linux/tests.linux + valgrind ./src/bin-x86_64-linux/tests.linux + + arm32: + name: ARM32 + runs-on: ubuntu-20.04 + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Install packages + run: | + sudo apt update + sudo apt install -y -o Acquire::Retries=50 \ + mtools syslinux isolinux gcc-arm-none-eabi + - name: Build + run: | + make -j 4 -C src CROSS=arm-none-eabi- \ + bin-arm32-efi/intel.efi \ + bin-arm32-efi/intel.usb \ + bin-arm32-efi/intel.iso + + arm64: + name: ARM64 + runs-on: ubuntu-20.04 + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Install packages + run: | + sudo apt update + sudo apt install -y -o Acquire::Retries=50 \ + mtools syslinux isolinux gcc-aarch64-linux-gnu + - name: Build + run: | + make -j 4 -C src CROSS=aarch64-linux-gnu- \ + bin-arm64-efi/ipxe.efi \ + bin-arm64-efi/ipxe.usb \ + bin-arm64-efi/ipxe.iso diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml new file mode 100644 index 000000000..20634be42 --- /dev/null +++ b/.github/workflows/coverity.yml @@ -0,0 +1,37 @@ +name: Coverity Scan + +on: + push: + branches: + - coverity_scan + +jobs: + submit: + name: Submit + runs-on: ubuntu-20.04 + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Download Coverity Scan + run: | + curl --form token=${{ secrets.COVERITY_SCAN_TOKEN }} \ + --form project=${{ github.repository }} \ + --output coverity.tar.gz \ + https://scan.coverity.com/download/cxx/linux64 + mkdir -p /opt/coverity + sudo tar xvzf coverity.tar.gz --strip 1 --directory /opt/coverity + - name: Build via Coverity Scan + run: | + make -C src bin/deps + /opt/coverity/bin/cov-build --dir cov-int make -C src bin/blib.a + - name: Create submission + run : | + tar cvzf cov-int.tar.gz cov-int + - name: Submit to Coverity Scan + run: | + curl --form token=${{ secrets.COVERITY_SCAN_TOKEN }} \ + --form email=${{ secrets.COVERITY_SCAN_EMAIL }} \ + --form file=@cov-int.tar.gz \ + --form version=${{ github.sha }} \ + --form description=${{ github.ref }} \ + https://scan.coverity.com/builds?project=${{ github.repository }} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 43849cc5b..000000000 --- a/.travis.yml +++ /dev/null @@ -1,57 +0,0 @@ -dist: trusty - -sudo: false - -git: - depth: false - -language: c - -cache: ccache - -compiler: - - gcc - -addons: - apt: - packages: - - binutils-dev - - liblzma-dev - - syslinux - - genisoimage - coverity_scan: - project: - name: "ipxe/ipxe" - version: $TRAVIS_COMMIT - build_command_prepend: "make -C src bin/deps" - build_command: "make -C src bin/blib.a" - branch_pattern: coverity_scan - -env: - global: - - MAKEFLAGS="-j 4" - -script: - - make -C src bin/blib.a - - make -C src bin/ipxe.pxe - - make -C src bin/ipxe.usb - - make -C src bin/ipxe.iso - - make -C src bin/8086100e.mrom - - make -C src bin-x86_64-pcbios/blib.a - - make -C src bin-x86_64-pcbios/ipxe.pxe - - make -C src bin-x86_64-pcbios/ipxe.usb - - make -C src bin-x86_64-pcbios/ipxe.iso - - make -C src bin-x86_64-pcbios/8086100e.mrom - - make -C src bin-x86_64-efi/blib.a - - make -C src bin-x86_64-efi/ipxe.efi - - make -C src bin-x86_64-efi/intel.efidrv - - make -C src bin-x86_64-efi/intel.efirom - - make -C src bin-i386-efi/blib.a - - make -C src bin-i386-efi/ipxe.efi - - make -C src bin-i386-efi/intel.efidrv - - make -C src bin-i386-efi/intel.efirom - - make -C src bin-x86_64-linux/blib.a - - make -C src bin-x86_64-linux/tap.linux - - make -C src bin-x86_64-linux/af_packet.linux - - make -C src bin-x86_64-linux/tests.linux - - ./src/bin-x86_64-linux/tests.linux diff --git a/contrib/cloud/aws-import b/contrib/cloud/aws-import new file mode 100755 index 000000000..eef4302d5 --- /dev/null +++ b/contrib/cloud/aws-import @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 + +import argparse +from base64 import b64encode +from concurrent.futures import ThreadPoolExecutor, as_completed +from datetime import date +from hashlib import sha256 +from itertools import count +import subprocess + +import boto3 + +BLOCKSIZE = 512 * 1024 + + +def detect_architecture(image): + """Detect CPU architecture""" + mdir = subprocess.run(['mdir', '-b', '-i', image, '::/EFI/BOOT'], + capture_output=True) + if any(b'BOOTAA64.EFI' in x for x in mdir.stdout.splitlines()): + return 'arm64' + return 'x86_64' + + +def create_snapshot(region, description, image): + """Create an EBS snapshot""" + client = boto3.client('ebs', region_name=region) + snapshot = client.start_snapshot(VolumeSize=1, + Description=description) + snapshot_id = snapshot['SnapshotId'] + with open(image, 'rb') as fh: + for block in count(): + data = fh.read(BLOCKSIZE) + if not data: + break + data = data.ljust(BLOCKSIZE, b'\0') + checksum = b64encode(sha256(data).digest()).decode() + client.put_snapshot_block(SnapshotId=snapshot_id, + BlockIndex=block, + BlockData=data, + DataLength=BLOCKSIZE, + Checksum=checksum, + ChecksumAlgorithm='SHA256') + client.complete_snapshot(SnapshotId=snapshot_id, + ChangedBlocksCount=block) + return snapshot_id + + +def import_image(region, name, architecture, image, public): + """Import an AMI image""" + client = boto3.client('ec2', region_name=region) + resource = boto3.resource('ec2', region_name=region) + description = '%s (%s)' % (name, architecture) + snapshot_id = create_snapshot(region=region, description=description, + image=image) + client.get_waiter('snapshot_completed').wait(SnapshotIds=[snapshot_id]) + image = client.register_image(Architecture=architecture, + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': { + 'SnapshotId': snapshot_id, + 'VolumeType': 'standard', + }, + }], + EnaSupport=True, + Name=description, + RootDeviceName='/dev/sda1', + SriovNetSupport='simple', + VirtualizationType='hvm') + image_id = image['ImageId'] + client.get_waiter('image_available').wait(ImageIds=[image_id]) + if public: + resource.Image(image_id).modify_attribute(Attribute='launchPermission', + OperationType='add', + UserGroups=['all']) + return image_id + + +def launch_link(region, image_id): + """Construct a web console launch link""" + return ("https://console.aws.amazon.com/ec2/v2/home?" + "region=%s#LaunchInstanceWizard:ami=%s" % (region, image_id)) + + +# Parse command-line arguments +parser = argparse.ArgumentParser(description="Import AWS EC2 image (AMI)") +parser.add_argument('--name', '-n', + help="Image name") +parser.add_argument('--public', '-p', action='store_true', + help="Make image public") +parser.add_argument('--region', '-r', action='append', + help="AWS region(s)") +parser.add_argument('--wiki', '-w', metavar='FILE', + help="Generate Dokuwiki table") +parser.add_argument('image', nargs='+', help="iPXE disk image") +args = parser.parse_args() + +# Detect CPU architectures +architectures = {image: detect_architecture(image) for image in args.image} + +# Use default name if none specified +if not args.name: + args.name = 'iPXE (%s)' % date.today().strftime('%Y-%m-%d') + +# Use all regions if none specified +if not args.region: + args.region = sorted(x['RegionName'] for x in + boto3.client('ec2').describe_regions()['Regions']) + +# Use one thread per import to maximise parallelism +imports = [(region, image) for region in args.region for image in args.image] +with ThreadPoolExecutor(max_workers=len(imports)) as executor: + futures = {executor.submit(import_image, + region=region, + name=args.name, + architecture=architectures[image], + image=image, + public=args.public): (region, image) + for region, image in imports} + results = {futures[future]: future.result() + for future in as_completed(futures)} + +# Construct Dokuwiki table +wikitab = ["^ AWS region ^ CPU architecture ^ AMI ID ^\n"] + list( + "| ''%s'' | ''%s'' | ''[[%s|%s]]'' |\n" % ( + region, + architectures[image], + launch_link(region, results[(region, image)]), + results[(region, image)], + ) for region, image in imports) +if args.wiki: + with open(args.wiki, 'wt') as fh: + fh.writelines(wikitab) + +# Show created images +for region, image in imports: + print("%s %s %s %s" % ( + region, image, architectures[image], results[(region, image)] + )) diff --git a/contrib/vm/bochsrc.txt b/contrib/vm/bochsrc.txt index d0f12504b..feda98595 100644 --- a/contrib/vm/bochsrc.txt +++ b/contrib/vm/bochsrc.txt @@ -7,10 +7,15 @@ # directly with this option and some of them install a config option that is # only available when the plugin device is loaded. The value "1" means to load # the plugin and "0" will unload it (if loaded before). -# These plugins are currently supported: 'biosdev', 'e1000', 'es1370', -# 'extfpuirq', 'gameport', 'iodebug', 'ne2k', 'parallel', 'pcidev', 'pcipnic', -# 'sb16', 'serial', 'speaker', 'unmapped', 'usb_ohci', 'usb_uhci' and 'usb_xhci'. +# +# These plugins will be loaded by default (if present): 'biosdev', 'extfpuirq', +# 'gameport', 'iodebug','parallel', 'serial', 'speaker' and 'unmapped'. +# +# These plugins are also supported, but they are usually loaded directly with +# their bochsrc option: 'e1000', 'es1370', 'ne2k', 'pcidev', 'pcipnic', 'sb16', +# 'usb_ehci', 'usb_ohci', 'usb_uhci', 'usb_xhci' and 'voodoo'. #======================================================================= +#plugin_ctrl: unmapped=0, e1000=1 # unload 'unmapped' and load 'e1000' plugin_ctrl: unmapped=1, biosdev=1, speaker=1, e1000=1, parallel=1, serial=1 #======================================================================= @@ -50,10 +55,11 @@ plugin_ctrl: unmapped=1, biosdev=1, speaker=1, e1000=1, parallel=1, serial=1 # carbon use Carbon library (for MacOS X) # macintosh use MacOS pre-10 # amigaos use native AmigaOS libraries -# sdl use SDL library, cross platform -# svga use SVGALIB library for Linux, allows graphics without X11 +# sdl use SDL 1.2.x library, cross platform +# sdl2 use SDL 2.x library, cross platform # term text only, uses curses/ncurses library, cross platform # rfb provides an interface to AT&T's VNC viewer, cross platform +# vncsrv use LibVNCServer for extended RFB(VNC) support # wx use wxWidgets library, cross platform # nogui no display at all # @@ -64,52 +70,50 @@ plugin_ctrl: unmapped=1, biosdev=1, speaker=1, e1000=1, parallel=1, serial=1 # Some display libraries now support specific options to control their # behaviour. These options are supported by more than one display library: # -# "gui_debug" - use GTK debugger gui (sdl, x) / Win32 debugger gui (win32) -# "hideIPS" - disable IPS output in status bar (sdl, wx, x) -# "nokeyrepeat" - turn off host keyboard repeat (sdl, win32, x) +# "cmdmode" - call a headerbar button handler after pressing F7 (sdl, sdl2, +# win32, x) +# "fullscreen" - startup in fullscreen mode (sdl, sdl2) +# "gui_debug" - use GTK debugger gui (sdl, sdl2, x) / Win32 debugger gui (sdl, +# sdl2, win32) +# "hideIPS" - disable IPS output in status bar (rfb, sdl, sdl2, term, vncsrv, +# win32, wx, x) +# "nokeyrepeat" - turn off host keyboard repeat (sdl, sdl2, win32, x) +# "timeout" - time (in seconds) to wait for client (rfb, vncsrv) # # See the examples below for other currently supported options. +# Setting up options without specifying display library is also supported. #======================================================================= #display_library: amigaos #display_library: carbon #display_library: macintosh #display_library: nogui -#display_library: rfb, options="timeout=60" # time to wait for client -#display_library: sdl, options="fullscreen" # startup in fullscreen mode +#display_library: rfb +#display_library: sdl +#display_library: sdl2 #display_library: term -#display_library: win32 +#display_library: vncsrv +# "traphotkeys" - system hotkeys not handled by host OS, but sent to guest +# (win32 in mouse capture and fullscreen mode: alt-tab, win, +# alt-space, alt-esc, ctrl-esc) +# "autoscale" - scale small simulation window by factor 2, 4 or 8 depending +# on desktop window size +#display_library: win32, options="traphotkeys, autoscale" #display_library: wx #display_library: x -#======================================================================= -# ROMIMAGE: -# The ROM BIOS controls what the PC does when it first powers on. -# Normally, you can use a precompiled BIOS in the source or binary -# distribution called BIOS-bochs-latest. The ROM BIOS is usually loaded -# starting at address 0xf0000, and it is exactly 64k long. Another option -# is 128k BIOS which is loaded at address 0xe0000. -# You can also use the environment variable $BXSHARE to specify the -# location of the BIOS. -# The usage of external large BIOS images (up to 512k) at memory top is -# now supported, but we still recommend to use the BIOS distributed with -# Bochs. The start address optional, since it can be calculated from image size. -#======================================================================= -#romimage: file=$BXSHARE/BIOS-bochs-latest -#romimage: file=bios/seabios-1.6.3.bin -#romimage: file=mybios.bin, address=0xfff80000 # 512k at memory top -romimage: file=bochs/bios/BIOS-bochs-latest - #======================================================================= # CPU: # This defines cpu-related parameters inside Bochs: # # MODEL: # Selects CPU configuration to emulate from pre-defined list of all -# supported configurations. When this option is used, the CPUID option -# has no effect anymore. +# supported configurations. When this option is used and the value +# is different from 'bx_generic', the parameters of the CPUID option +# have no effect anymore. # # CPU configurations that can be selected: # ----------------------------------------------------------------- +# pentium Intel Pentium (P54C) # pentium_mmx Intel Pentium MMX # amd_k6_2_chomper AMD-K6(tm) 3D processor (Chomper) # p2_klamath Intel Pentium II (Klamath) @@ -117,23 +121,28 @@ romimage: file=bochs/bios/BIOS-bochs-latest # p4_willamette Intel(R) Pentium(R) 4 (Willamette) # core_duo_t2400_yonah Intel(R) Core(TM) Duo CPU T2400 (Yonah) # atom_n270 Intel(R) Atom(TM) CPU N270 +# p4_prescott_celeron_336 Intel(R) Celeron(R) 336 (Prescott) # athlon64_clawhammer AMD Athlon(tm) 64 Processor 2800+ (Clawhammer) # athlon64_venice AMD Athlon(tm) 64 Processor 3000+ (Venice) # turion64_tyler AMD Turion(tm) 64 X2 Mobile TL-60 (Tyler) # phenom_8650_toliman AMD Phenom X3 8650 (Toliman) -# p4_prescott_celeron_336 Intel(R) Celeron(R) 336 (Prescott) # core2_penryn_t9600 Intel Mobile Core 2 Duo T9600 (Penryn) # corei5_lynnfield_750 Intel(R) Core(TM) i5 750 (Lynnfield) # corei5_arrandale_m520 Intel(R) Core(TM) i5 M 520 (Arrandale) # corei7_sandy_bridge_2600k Intel(R) Core(TM) i7-2600K (Sandy Bridge) +# zambezi AMD FX(tm)-4100 Quad-Core Processor (Zambezi) +# trinity_apu AMD A8-5600K APU (Trinity) +# ryzen AMD Ryzen 7 1700 # corei7_ivy_bridge_3770k Intel(R) Core(TM) i7-3770K CPU (Ivy Bridge) +# corei7_haswell_4770 Intel(R) Core(TM) i7-4770 CPU (Haswell) +# broadwell_ult Intel(R) Processor 5Y70 CPU (Broadwell) # # COUNT: -# Set the number of processors:cores per processor:threads per core -# when Bochs is compiled for SMP emulation. -# Bochs currently supports up to 8 threads running simultaniosly. -# If Bochs is compiled without SMP support, it won't accept values -# different from 1. +# Set the number of processors:cores per processor:threads per core when +# Bochs is compiled for SMP emulation. Bochs currently supports up to +# 14 threads (legacy APIC) or 254 threads (xAPIC or higher) running +# simultaniosly. If Bochs is compiled without SMP support, it won't accept +# values different from 1. # # QUANTUM: # Maximum amount of instructions allowed to execute by processor before @@ -156,7 +165,7 @@ romimage: file=bochs/bios/BIOS-bochs-latest # IGNORE_BAD_MSRS: # Ignore MSR references that Bochs does not understand; print a warning # message instead of generating #GP exception. This option is enabled -# by default but will not be avaiable if configurable MSRs are enabled. +# by default but will not be available if configurable MSRs are enabled. # # MWAIT_IS_NOP: # When this option is enabled MWAIT will not put the CPU into a sleep state. @@ -205,23 +214,38 @@ cpu: cpuid_limit_winnt=0 # Select SYSENTER/SYSEXIT instruction set support. # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. # -# SSE: -# Select SSE instruction set support. -# Any of NONE/SSE/SSE2/SSE3/SSSE3/SSE4_1/SSE4_2 could be selected. +# SIMD: +# Select SIMD instructions support. +# Any of NONE/SSE/SSE2/SSE3/SSSE3/SSE4_1/SSE4_2/AVX/AVX2/AVX512 +# could be selected. +# # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. +# The AVX choises exists only if Bochs compiled with --enable-avx option. # # SSE4A: # Select AMD SSE4A instructions support. # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. # +# MISALIGNED_SSE: +# Select AMD Misaligned SSE mode support. +# This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. +# # AES: # Select AES instruction set support. # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. # +# SHA: +# Select SHA instruction set support. +# This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. +# # MOVBE: # Select MOVBE Intel(R) Atom instruction support. # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. # +# ADX: +# Select ADCX/ADOX instructions support. +# This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. +# # XSAVE: # Select XSAVE extensions support. # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. @@ -230,10 +254,6 @@ cpu: cpuid_limit_winnt=0 # Select XSAVEOPT instruction support. # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. # -# AVX: -# Select AVX/AVX2 instruction set support. -# This option exists only if Bochs compiled with --enable-avx option. -# # AVX_F16C: # Select AVX float16 convert instructions support. # This option exists only if Bochs compiled with --enable-avx option. @@ -278,6 +298,10 @@ cpu: cpuid_limit_winnt=0 # Enable Supervisor Mode Execution Protection (SMEP) support. # This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. # +# SMAP: +# Enable Supervisor Mode Access Prevention (SMAP) support. +# This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6. +# # MWAIT: # Select MONITOR/MWAIT instructions support. # This option exists only if Bochs compiled with --enable-monitor-mwait. @@ -286,6 +310,10 @@ cpu: cpuid_limit_winnt=0 # Select VMX extensions emulation support. # This option exists only if Bochs compiled with --enable-vmx option. # +# SVM: +# Select AMD SVM (Secure Virtual Machine) extensions emulation support. +# This option exists only if Bochs compiled with --enable-svm option. +# # VENDOR_STRING: # Set the CPUID vendor string returned by CPUID(0x0). This should be a # twelve-character ASCII string. @@ -294,6 +322,12 @@ cpu: cpuid_limit_winnt=0 # Set the CPUID vendor string returned by CPUID(0x80000002 .. 0x80000004). # This should be at most a forty-eight-character ASCII string. # +# LEVEL: +# Set emulated CPU level information returned by CPUID. Default value is +# determined by configure option --enable-cpu-level. Currently supported +# values are 5 (for Pentium and similar processors) and 6 (for P6 and +# later processors). +# # FAMILY: # Set model information returned by CPUID. Default family value determined # by configure option --enable-cpu-level. @@ -304,7 +338,7 @@ cpu: cpuid_limit_winnt=0 # STEPPING: # Set stepping information returned by CPUID. Default stepping value is 3. #======================================================================= -#cpuid: x86_64=1, mmx=1, sep=1, sse=sse4_2, apic=xapic, aes=1, movbe=1, xsave=1 +#cpuid: x86_64=1, mmx=1, sep=1, simd=sse4_2, apic=xapic, aes=1, movbe=1, xsave=1 #cpuid: family=6, model=0x1a, stepping=5 #======================================================================= @@ -326,6 +360,37 @@ cpu: cpuid_limit_winnt=0 #======================================================================= memory: guest=512, host=256 +#======================================================================= +# ROMIMAGE: +# The ROM BIOS controls what the PC does when it first powers on. +# Normally, you can use a precompiled BIOS in the source or binary +# distribution called BIOS-bochs-latest. The default ROM BIOS is usually loaded +# starting at address 0xfffe0000, and it is exactly 128k long. The legacy +# version of the Bochs BIOS is usually loaded starting at address 0xffff0000, +# and it is exactly 64k long. +# You can use the environment variable $BXSHARE to specify the location +# of the BIOS. +# The usage of external large BIOS images (up to 512k) at memory top is +# now supported, but we still recommend to use the BIOS distributed with Bochs. +# The start address is optional, since it can be calculated from image size. +# The Bochs BIOS currently supports only the option "fastboot" to skip the +# boot menu delay. +#======================================================================= +#romimage: file=$BXSHARE/BIOS-bochs-latest, options=fastboot +#romimage: file=$BXSHARE/bios.bin-1.13.0 # http://www.seabios.org/SeaBIOS +#romimage: file=mybios.bin, address=0xfff80000 # 512k at memory top +romimage: file=bochs/bios/BIOS-bochs-latest + +#======================================================================= +# VGAROMIMAGE +# You now need to load a VGA ROM BIOS into C0000. +#======================================================================= +#vgaromimage: file=$BXSHARE/VGABIOS-lgpl-latest +#vgaromimage: file=bios/VGABIOS-lgpl-latest-cirrus +#vgaromimage: file=$BXSHARE/vgabios-cirrus.bin-1.13.0 # http://www.seabios.org/SeaVGABIOS +#vgaromimage: file=bios/VGABIOS-elpin-2.40 +vgaromimage: file=bochs/bios/VGABIOS-lgpl-latest + #======================================================================= # OPTROMIMAGE[1-4]: # You may now load up to 4 optional ROM images. Be sure to use a @@ -348,15 +413,6 @@ optromimage1: file=../../src/bin/intel.rom, address=0xcb000 #optramimage3: file=/path/file3.img, address=0x0030000 #optramimage4: file=/path/file4.img, address=0x0040000 -#======================================================================= -# VGAROMIMAGE -# You now need to load a VGA ROM BIOS into C0000. -#======================================================================= -#vgaromimage: file=bios/VGABIOS-elpin-2.40 -#vgaromimage: file=$BXSHARE/VGABIOS-lgpl-latest -#vgaromimage: file=bios/VGABIOS-lgpl-latest-cirrus -vgaromimage: file=bochs/bios/VGABIOS-lgpl-latest - #======================================================================= # VGA: # This defines parameters related to the VGA display @@ -364,20 +420,227 @@ vgaromimage: file=bochs/bios/VGABIOS-lgpl-latest # EXTENSION # Here you can specify the display extension to be used. With the value # 'none' you can use standard VGA with no extension. Other supported -# values are 'vbe' for Bochs VBE and 'cirrus' for Cirrus SVGA support. +# values are 'vbe' for Bochs VBE, 'cirrus' for Cirrus SVGA support and +# 'voodoo' for Voodoo Graphics support (see 'voodoo' option). # # UPDATE_FREQ -# The VGA update frequency is based on the emulated clock and the default -# value is 5. Keep in mind that you must tweak the 'cpu: ips=N' directive -# to be as close to the number of emulated instructions-per-second your -# workstation can do, for this to be accurate. If the realtime sync is -# enabled with the 'clock' option, the value is based on the real time. -# This parameter can be changed at runtime. +# This parameter specifies the number of display updates per second. +# The VGA update timer by default uses the realtime engine with a value +# of 5. This parameter can be changed at runtime. +# +# REALTIME +# If set to 1 (default), the VGA timer is based on realtime, otherwise it +# is driven by the cpu and depends on the ips setting. If the host is slow +# (low ips, update_freq) and the guest uses HLT appropriately, setting this +# to 0 and "clock: sync=none" may improve the responsiveness of the guest +# GUI when the guest is otherwise idle. +# +# DDC +# This parameter defines the behaviour of the DDC emulation that returns +# the monitor EDID data. By default the 'builtin' values for 'Bochs Screen' +# are used. Other choices are 'disabled' (no DDC emulation) and 'file' +# (read monitor EDID from file / path name separated with a colon). +# Examples: +# vga: extension=cirrus, update_freq=10, ddc=builtin +#======================================================================= +#vga: extension=vbe, update_freq=5, realtime=1, ddc=file:monitor.bin + +#======================================================================= +# VOODOO: +# This defines the Voodoo Graphics emulation (experimental). Currently +# supported models are 'voodoo1', 'voodoo2', 'banshee' and 'voodoo3'. The +# Voodoo2 support is not yet complete, but almost usable. The Banshee and +# Voodoo3 support is under construction, but basically usable. The 2D/3D cards +# require an external VGA BIOS the vga extension option to be set to 'voodoo'. +# If the i440BX PCI chipset is selected, they can be assigned to AGP (slot #5). +# The gui screen update timing for all models is controlled by the related 'vga' +# options. # # Examples: -# vga: extension=cirrus, update_freq=10 +# voodoo: enabled=1, model=voodoo2 #======================================================================= -#vga: extension=vbe, update_freq=5 +#voodoo: enabled=1, model=voodoo1 + +#======================================================================= +# KEYBOARD: +# This defines parameters related to the emulated keyboard +# +# TYPE: +# Type of keyboard return by a "identify keyboard" command to the +# keyboard controller. It must be one of "xt", "at" or "mf". +# Defaults to "mf". It should be ok for almost everybody. A known +# exception is french macs, that do have a "at"-like keyboard. +# +# SERIAL_DELAY: +# Approximate time in microseconds that it takes one character to +# be transferred from the keyboard to controller over the serial path. +# +# PASTE_DELAY: +# Approximate time in microseconds between attempts to paste +# characters to the keyboard controller. This leaves time for the +# guest os to deal with the flow of characters. The ideal setting +# depends on how your operating system processes characters. The +# default of 100000 usec (.1 seconds) was chosen because it works +# consistently in Windows. +# If your OS is losing characters during a paste, increase the paste +# delay until it stops losing characters. +# +# KEYMAP: +# This enables a remap of a physical localized keyboard to a +# virtualized us keyboard, as the PC architecture expects. +# +# USER_SHORTCUT: +# This defines the keyboard shortcut to be sent when you press the "user" +# button in the headerbar. The shortcut string is a combination of maximum +# 3 key names (listed below) separated with a '-' character. +# Valid key names: +# "alt", "bksl", "bksp", "ctrl", "del", "down", "end", "enter", "esc", +# "f1", ... "f12", "home", "ins", "left", "menu", "minus", "pgdwn", "pgup", +# "plus", "power", "print", "right", "scrlck", "shift", "space", "tab", "up" +# and "win". + +# Examples: +# keyboard: type=mf, serial_delay=200, paste_delay=100000 +# keyboard: keymap=gui/keymaps/x11-pc-de.map +# keyboard: user_shortcut=ctrl-alt-del +#======================================================================= +#keyboard: type=mf, serial_delay=250 + +#======================================================================= +# MOUSE: +# This defines parameters for the emulated mouse type, the initial status +# of the mouse capture and the runtime method to toggle it. +# +# TYPE: +# With the mouse type option you can select the type of mouse to emulate. +# The default value is 'ps2'. The other choices are 'imps2' (wheel mouse +# on PS/2), 'serial', 'serial_wheel', 'serial_msys' (one com port requires +# setting 'mode=mouse') 'inport' and 'bus' (if present). To connect a mouse +# to a USB port, see the 'usb_uhci', 'usb_ohci', 'usb_ehci' or 'usb_xhci' +# options (requires PCI and USB support). +# +# ENABLED: +# The Bochs gui creates mouse "events" unless the 'enabled' option is +# set to 0. The hardware emulation itself is not disabled by this. +# Unless you have a particular reason for enabling the mouse by default, +# it is recommended that you leave it off. You can also toggle the mouse +# usage at runtime (RFB, SDL, Win32, wxWidgets and X11 - see below). +# +# TOGGLE: +# The default method to toggle the mouse capture at runtime is to press the +# CTRL key and the middle mouse button ('ctrl+mbutton'). This option allows +# to change the method to 'ctrl+f10' (like DOSBox), 'ctrl+alt' (like QEMU) +# or 'f12'. +# +# Examples: +# mouse: enabled=1 +# mouse: type=imps2, enabled=1 +# mouse: type=serial, enabled=1 +# mouse: enabled=0, toggle=ctrl+f10 +#======================================================================= +mouse: enabled=0 + +#======================================================================= +# PCI: +# This defines the parameters to set up the Bochs PCI emulation: +# +# ENABLED: +# If Bochs is compiled with PCI support, it is enabled by default. +# +# CHIPSET: +# Currently the chipsets i430FX, i440FX and i440BX (limited) are supported and +# the default is i440FX. +# +# SLOTx: +# It is possible to specify the devices connected to PCI slots. Up to 5 slots +# are available. For combined PCI/ISA devices assigning to slot is mandatory +# if the PCI model should be emulated (cirrus, ne2k and pcivga). Setting up +# slot for PCI-only devices is also supported, but they are auto-assigned if +# not specified (e1000, es1370, pcidev, pcipnic, usb_ehci, usb_ohci, usb_xhci, +# voodoo). All device models except the network devices ne2k and e1000 can be +# used only once in the slot configuration. In case of the i440BX chipset, the +# slot #5 is the AGP slot. Currently only the 'voodoo' device can be assigned +# to AGP. +# +# ADVOPTS: +# With the advanced PCI options it is possible to control the behaviour of the +# PCI chipset. These options can be specified as comma-separated values. +# By default the "Bochs i440FX" chipset enables the ACPI and HPET devices, but +# original i440FX doesn't support them. The options 'noacpi' and 'nohpet' make +# it possible to disable them. +# +# Example: +# pci: enabled=1, chipset=i440fx, slot1=pcivga, slot2=ne2k, advopts=noacpi +#======================================================================= +pci: enabled=1, chipset=i440fx + +#======================================================================= +# CLOCK: +# This defines the parameters of the clock inside Bochs: +# +# SYNC: +# This defines the method how to synchronize the Bochs internal time +# with realtime. With the value 'none' the Bochs time relies on the IPS +# value and no host time synchronization is used. The 'slowdown' method +# sacrifices performance to preserve reproducibility while allowing host +# time correlation. The 'realtime' method sacrifices reproducibility to +# preserve performance and host-time correlation. +# It is possible to enable both synchronization methods. +# +# RTC_SYNC: +# If this option is enabled together with the realtime synchronization, +# the RTC runs at realtime speed. This feature is disabled by default. +# +# TIME0: +# Specifies the start (boot) time of the virtual machine. Use a time +# value as returned by the time(2) system call or a string as returned +# by the ctime(3) system call. If no time0 value is set or if time0 +# equal to 1 (special case) or if time0 equal 'local', the simulation +# will be started at the current local host time. If time0 equal to 2 +# (special case) or if time0 equal 'utc', the simulation will be started +# at the current utc time. +# +# Syntax: +# clock: sync=[none|slowdown|realtime|both], time0=[timeValue|local|utc] +# +# Example: +# clock: sync=none, time0=local # Now (localtime) +# clock: sync=slowdown, time0=315529200 # Tue Jan 1 00:00:00 1980 +# clock: sync=none, time0="Mon Jan 1 00:00:00 1990" # 631148400 +# clock: sync=realtime, time0=938581955 # Wed Sep 29 07:12:35 1999 +# clock: sync=realtime, time0="Sat Jan 1 00:00:00 2000" # 946681200 +# clock: sync=none, time0=1 # Now (localtime) +# clock: sync=none, time0=utc # Now (utc/gmt) +# +# Default value are sync=none, rtc_sync=0, time0=local +#======================================================================= +#clock: sync=none, time0=local + +#======================================================================= +# CMOSIMAGE: +# This defines a binary image file with size 128 bytes that can be loaded into +# the CMOS RAM at startup. The rtc_init parameter controls whether initialize +# the RTC with values stored in the image. By default the time0 argument given +# to the clock option is used. With 'rtc_init=image' the image is the source +# for the initial time. +# +# Example: +# cmosimage: file=cmos.img, rtc_init=image +#======================================================================= +#cmosimage: file=cmos.img, rtc_init=time0 + +#======================================================================= +# private_colormap: Request that the GUI create and use it's own +# non-shared colormap. This colormap will be used +# when in the bochs window. If not enabled, a +# shared colormap scheme may be used. Not implemented +# on all GUI's. +# +# Examples: +# private_colormap: enabled=1 +# private_colormap: enabled=0 +#======================================================================= +private_colormap: enabled=0 #======================================================================= # FLOPPYA: @@ -450,14 +713,14 @@ ata3: enabled=0, ioaddr1=0x168, ioaddr2=0x360, irq=9 # # This defines the type and characteristics of all attached ata devices: # type= type of attached device [disk|cdrom] -# mode= only valid for disks [flat|concat|external|dll|sparse|vmware3] -# mode= only valid for disks [undoable|growing|volatile|vvfat] +# mode= only valid for disks [flat|concat|dll|sparse|vmware3|vmware4] +# [undoable|growing|volatile|vpc|vbox|vvfat] # path= path of the image / directory # cylinders= only valid for disks # heads= only valid for disks # spt= only valid for disks # status= only valid for cdroms [inserted|ejected] -# biosdetect= type of biosdetection [none|auto], only for disks on ata0 [cmos] +# biosdetect= type of biosdetection [auto|cmos|none] # translation=type of translation of the bios, only for disks [none|lba|large|rechs|auto] # model= string returned by identify device command # journal= optional filename of the redolog for undoable, volatile and vvfat disks @@ -519,48 +782,6 @@ ata3: enabled=0, ioaddr1=0x168, ioaddr2=0x360, irq=9 #boot: disk boot: network, floppy -#======================================================================= -# CLOCK: -# This defines the parameters of the clock inside Bochs: -# -# SYNC: -# This defines the method how to synchronize the Bochs internal time -# with realtime. With the value 'none' the Bochs time relies on the IPS -# value and no host time synchronization is used. The 'slowdown' method -# sacrifices performance to preserve reproducibility while allowing host -# time correlation. The 'realtime' method sacrifices reproducibility to -# preserve performance and host-time correlation. -# It is possible to enable both synchronization methods. -# -# RTC_SYNC: -# If this option is enabled together with the realtime synchronization, -# the RTC runs at realtime speed. This feature is disabled by default. -# -# TIME0: -# Specifies the start (boot) time of the virtual machine. Use a time -# value as returned by the time(2) system call. If no time0 value is -# set or if time0 equal to 1 (special case) or if time0 equal 'local', -# the simulation will be started at the current local host time. -# If time0 equal to 2 (special case) or if time0 equal 'utc', -# the simulation will be started at the current utc time. -# -# Syntax: -# clock: sync=[none|slowdown|realtime|both], time0=[timeValue|local|utc] -# -# Example: -# clock: sync=none, time0=local # Now (localtime) -# clock: sync=slowdown, time0=315529200 # Tue Jan 1 00:00:00 1980 -# clock: sync=none, time0=631148400 # Mon Jan 1 00:00:00 1990 -# clock: sync=realtime, time0=938581955 # Wed Sep 29 07:12:35 1999 -# clock: sync=realtime, time0=946681200 # Sat Jan 1 00:00:00 2000 -# clock: sync=none, time0=1 # Now (localtime) -# clock: sync=none, time0=utc # Now (utc/gmt) -# -# Default value are sync=none, time0=local -#======================================================================= -#clock: sync=none, time0=local - - #======================================================================= # FLOPPY_BOOTSIG_CHECK: disabled=[0|1] # Enables or disables the 0xaa55 signature check on boot floppies @@ -613,9 +834,11 @@ log: bochsout.txt # debug: messages useful only when debugging the code. This may # spit out thousands per second. # -# For events of each level, you can choose to exit Bochs ('fatal'), 'report' -# or 'ignore'. On some guis you have the additional choice 'ask'. A gui dialog -# appears asks how to proceed. +# For events of each level, you can choose to exit Bochs ('fatal'), 'ask', +# 'warn', 'report' or 'ignore'. The choices 'ask' and 'warn' are not supported +# by all guis, since they should bring up a dialog box. The 'warn' dialog is +# designed to confirm errors and the 'ask' dialog is usually used for panics +# and asks the user how to proceed. # # It is also possible to specify the 'action' to do for each Bochs facility # separately (e.g. crash on panics from everything except the cdrom, and only @@ -646,10 +869,10 @@ debugger_log: - #======================================================================= # COM1, COM2, COM3, COM4: -# This defines a serial port (UART type 16550A). In the 'term' you can specify -# a device to use as com1. This can be a real serial line, or a pty. To use -# a pty (under X/Unix), create two windows (xterms, usually). One of them will -# run bochs, and the other will act as com1. Find out the tty the com1 +# This defines a serial port (UART type 16550A). In the 'term' mode you can +# specify a device to use as com1. This can be a real serial line, or a pty. +# To use a pty (under X/Unix), create two windows (xterms, usually). One of +# them will run bochs, and the other will act as com1. Find out the tty the com1 # window using the `tty' command, and use that as the `dev' parameter. # Then do `sleep 1000000' in the com1 window to keep the shell from # messing with things, and run bochs in the other window. Serial I/O to @@ -660,13 +883,13 @@ debugger_log: - # opens socket/named pipe and waits until a client application connects to it # before starting simulation. This mode is useful for remote debugging (e.g. # with gdb's "target remote host:port" command or windbg's command line option -# -k com:pipe,port=\\.\pipe\pipename). Note: 'socket' is a shorthand for -# 'socket-client' and 'pipe' for 'pipe-client'. Socket modes use simple TCP -# communication, pipe modes use duplex byte mode pipes. +# -k com:pipe,port=\\.\pipe\pipename). Socket modes use simple TCP communication, +# pipe modes use duplex byte mode pipes. # Other serial modes are 'null' (no input/output), 'file' (output to a file -# specified as the 'dev' parameter), 'raw' (use the real serial port - under -# construction for win32), 'mouse' (standard serial mouse - requires -# mouse option setting 'type=serial', 'type=serial_wheel' or 'type=serial_msys'). +# specified as the 'dev' parameter and changeable at runtime), 'raw' (use the +# real serial port - partly implemented on win32), 'mouse' (standard serial +# mouse - requires mouse option setting 'type=serial', 'type=serial_wheel' or +# 'type=serial_msys'). # # Examples: # com1: enabled=1, mode=null @@ -687,7 +910,7 @@ debugger_log: - # defined the emulated printer port sends characters printed by the guest OS # into the output file. On some platforms a device filename can be used to # send the data to the real parallel port (e.g. "/dev/lp0" on Linux, "lpt1" on -# win32 platforms). +# win32 platforms). The output file can be changed at runtime. # # Examples: # parport1: enabled=1, file="parport.out" @@ -696,29 +919,77 @@ debugger_log: - #======================================================================= parport1: enabled=1, file="parport.out" +#======================================================================= +# SOUND: +# This defines the lowlevel sound driver(s) for the wave (PCM) input / output +# and the MIDI output feature and (if necessary) the devices to be used. +# It can have several of the following properties. +# All properties are in the format sound: property=value +# +# waveoutdrv: +# This defines the driver to be used for the waveout feature. +# Possible values are 'file' (all wave data sent to file), 'dummy' (no +# output) and the platform-dependant drivers 'alsa', 'oss', 'osx', 'sdl' +# and 'win'. +# waveout: +# This defines the device to be used for wave output (if necessary) or +# the output file for the 'file' driver. +# waveindrv: +# This defines the driver to be used for the wavein feature. +# Possible values are 'dummy' (recording silence) and platform-dependent +# drivers 'alsa', 'oss', 'sdl' and 'win'. +# wavein: +# This defines the device to be used for wave input (if necessary). +# midioutdrv: +# This defines the driver to be used for the MIDI output feature. +# Possible values are 'file' (all MIDI data sent to file), 'dummy' (no +# output) and platform-dependent drivers 'alsa', 'oss', 'osx' and 'win'. +# midiout: +# This defines the device to be used for MIDI output (if necessary). +# driver: +# This defines the driver to be used for all sound features with one +# property. Possible values are 'default' (platform default) and all +# other choices described above. Overriding one or more settings with +# the specific driver parameter is possible. +# +# Example for different drivers: +# sound: waveoutdrv=sdl, waveindrv=alsa, midioutdrv=dummy +#======================================================================= +#sound: driver=default, waveout=/dev/dsp. wavein=, midiout= + +#======================================================================= +# SPEAKER: +# This defines the PC speaker output mode. In the 'sound' mode the beep +# is generated by the square wave generator which is a part of the +# lowlevel sound support. In this mode the 'volume' parameter can be used +# to set the output volume (0 - 15). The 'system' mode is only available on +# Linux and Windows. On Linux /dev/console is used for output and on Windows +# the Beep() function. The 'gui' mode forwards the beep to the related +# gui methods (currently only used by the Carbon gui). +#======================================================================= +speaker: enabled=1, mode=system + #======================================================================= # SB16: # This defines the SB16 sound emulation. It can have several of the # following properties. # All properties are in the format sb16: property=value +# # enabled: # This optional property controls the presence of the SB16 emulation. # The emulation is turned on unless this property is used and set to 0. -# midi: The filename is where the midi data is sent. This can be a -# device or just a file if you want to record the midi data. -# midimode: -# 0=no data -# 1=output to device (system dependent. midi denotes the device driver) -# 2=SMF file output, including headers -# 3=output the midi data stream to the file (no midi headers and no -# delta times, just command and data bytes) -# wave: This is the device/file where wave output is stored -# wavemode: -# 0=no data -# 1=output to device (system dependent. wave denotes the device driver) -# 2=VOC file output, incl. headers -# 3=output the raw wave stream to the file -# log: The file to write the sb16 emulator messages to. +# midimode: This parameter specifies what to do with the MIDI output. +# 0 = no output +# 1 = output to device specified with the sound option (system dependent) +# 2 = MIDI or raw data output to file (depends on file name extension) +# 3 = dual output (mode 1 and 2 at the same time) +# midifile: This is the file where the midi output is stored (midimode 2 or 3). +# wavemode: This parameter specifies what to do with the PCM output. +# 0 = no output +# 1 = output to device specified with the sound option (system dependent) +# 2 = VOC, WAV or raw data output to file (depends on file name extension) +# 3 = dual output (mode 1 and 2 at the same time) +# wavefile: This is the file where the wave output is stored (wavemode 2 or 3). # loglevel: # 0=no log # 1=resource changes, midi program and bank changes @@ -726,125 +997,50 @@ parport1: enabled=1, file="parport.out" # 3=all errors # 4=all errors plus all port accesses # 5=all errors and port accesses plus a lot of extra info +# log: The file to write the sb16 emulator messages to. # dmatimer: # microseconds per second for a DMA cycle. Make it smaller to fix # non-continuous sound. 750000 is usually a good value. This needs a # reasonably correct setting for the IPS parameter of the CPU option. # -# Examples for output devices: -# sb16: midimode=1, midi="", wavemode=1, wave="" # win32 -# sb16: midimode=1, midi=alsa:128:0, wavemode=1, wave=alsa # Linux with ALSA +# Examples for output modes: +# sb16: midimode=2, midifile="output.mid", wavemode=1 # MIDI to file +# sb16: midimode=1, wavemode=3, wavefile="output.wav" # wave to file and device #======================================================================= -#sb16: midimode=1, midi=/dev/midi00, wavemode=1, wave=/dev/dsp, loglevel=2, log=sb16.log, dmatimer=600000 +#sb16: midimode=1, wavemode=1, loglevel=2, log=sb16.log, dmatimer=600000 #======================================================================= # ES1370: -# This defines the ES1370 sound emulation. The parameter 'enabled' controls the -# presence of the device. In addition to this, it must be loaded with 'plugin_ctrl' -# and assigned to a PCI slot. The 'wavedev' parameter is similar to the 'wave' -# parameter of the SB16 soundcard. The emulation supports recording and playback -# (except DAC1+DAC2 output at the same time). +# This defines the ES1370 sound emulation (recording and playback - except +# DAC1+DAC2 output at the same time). The parameter 'enabled' controls the +# presence of the device. The wave and MIDI output can be sent to device, file +# or both using the parameters 'wavemode', 'wavefile', 'midimode' and +# 'midifile'. See the description of these parameters at the SB16 directive. # # Examples: -# es1370: enabled=1, wavedev="" # win32 -# es1370: enabled=1, wavedev=alsa # Linux with ALSA +# es1370: enabled=1, wavemode=1 # use 'sound' parameters +# es1370: enabled=1, wavemode=2, wavefile=output.voc # send output to file #======================================================================= -#es1370: enabled=1, wavedev=alsa - -#======================================================================= -# KEYBOARD_SERIAL_DELAY: -# Approximate time in microseconds that it takes one character to -# be transfered from the keyboard to controller over the serial path. -# Examples: -# keyboard_serial_delay: 200 -#======================================================================= -keyboard_serial_delay: 250 - -#======================================================================= -# KEYBOARD_PASTE_DELAY: -# Approximate time in microseconds between attempts to paste -# characters to the keyboard controller. This leaves time for the -# guest os to deal with the flow of characters. The ideal setting -# depends on how your operating system processes characters. The -# default of 100000 usec (.1 seconds) was chosen because it works -# consistently in Windows. -# -# If your OS is losing characters during a paste, increase the paste -# delay until it stops losing characters. -# -# Examples: -# keyboard_paste_delay: 100000 -#======================================================================= -keyboard_paste_delay: 100000 - -#======================================================================= -# MOUSE: -# This defines parameters for the emulated mouse type, the initial status -# of the mouse capture and the runtime method to toggle it. -# -# TYPE: -# With the mouse type option you can select the type of mouse to emulate. -# The default value is 'ps2'. The other choices are 'imps2' (wheel mouse -# on PS/2), 'serial', 'serial_wheel' and 'serial_msys' (one com port requires -# setting 'mode=mouse'). To connect a mouse to an USB port, see the 'usb_uhci', -# 'usb_ohci' or 'usb_xhci' options (requires PCI and USB support). -# -# ENABLED: -# The Bochs gui creates mouse "events" unless the 'enabled' option is -# set to 0. The hardware emulation itself is not disabled by this. -# Unless you have a particular reason for enabling the mouse by default, -# it is recommended that you leave it off. You can also toggle the mouse -# usage at runtime (RFB, SDL, Win32, wxWidgets and X11 - see below). -# -# TOGGLE: -# The default method to toggle the mouse capture at runtime is to press the -# CTRL key and the middle mouse button ('ctrl+mbutton'). This option allows -# to change the method to 'ctrl+f10' (like DOSBox), 'ctrl+alt' (like QEMU) -# or 'f12' (replaces win32 'legacyF12' option). -# -# Examples: -# mouse: enabled=1 -# mouse: type=imps2, enabled=1 -# mouse: type=serial, enabled=1 -# mouse: enabled=0, toggle=ctrl+f10 -#======================================================================= -mouse: enabled=0 - -#======================================================================= -# private_colormap: Request that the GUI create and use it's own -# non-shared colormap. This colormap will be used -# when in the bochs window. If not enabled, a -# shared colormap scheme may be used. Not implemented -# on all GUI's. -# -# Examples: -# private_colormap: enabled=1 -# private_colormap: enabled=0 -#======================================================================= -private_colormap: enabled=0 - -#======================================================================= -# fullscreen: ONLY IMPLEMENTED ON AMIGA -# Request that Bochs occupy the entire screen instead of a -# window. -# -# Examples: -# fullscreen: enabled=0 -# fullscreen: enabled=1 -#======================================================================= -#fullscreen: enabled=0 -#screenmode: name="sample" +#es1370: enabled=1, wavemode=1 #======================================================================= # ne2k: NE2000 compatible ethernet adapter # # Format: -# ne2k: enabled=1, ioaddr=IOADDR, irq=IRQ, mac=MACADDR, ethmod=MODULE, -# ethdev=DEVICE, script=SCRIPT, bootrom=BOOTROM +# ne2k: card=CARD, enabled=1, type=TYPE, ioaddr=IOADDR, irq=IRQ, mac=MACADDR, +# ethmod=MODULE, ethdev=DEVICE, script=SCRIPT, bootrom=BOOTROM +# +# CARD: This is the zero-based card number to configure with this ne2k config +# line. Up to 4 devices are supported now (0...3). If not specified, the +# following parameters apply to card #0. +# +# TYPE: This is the card type to emulate ("isa" or "pci"). If not specified, +# card #0 defaults to "pci" if assigned to a pci slot. For the additional cards +# the type parameter should be set up. # # IOADDR, IRQ: You probably won't need to change ioaddr and irq, unless there # are IRQ conflicts. These arguments are ignored when assign the ne2k to a -# PCI slot. +# PCI slot or set the type to 'pci'. # # MAC: The MAC address MUST NOT match the address of any machine on the net. # Also, the first byte must be an even number (bit 0 set means a multicast @@ -857,6 +1053,8 @@ private_colormap: enabled=0 # Windows machines, you must run niclist to get the name of the ethdev. # Niclist source code is in misc/niclist.c and it is included in Windows # binary releases. +# The 'socket' module uses this parameter to specify the UDP port for +# receiving packets and (optional) the host to connect. # # SCRIPT: The script value is optional, and is the name of a script that # is executed after bochs initialize the network interface. You can use @@ -864,6 +1062,10 @@ private_colormap: enabled=0 # This is mainly useful for the tun/tap devices that only exist during # Bochs execution. The network interface name is supplied to the script # as first parameter. +# The 'slirp' module uses this parameter to specify a config file for +# setting up an alternative IP configuration or additional features. +# The 'vnet' module also uses this parameter to specify a config file similar +# to slirp, but with only a few settings. # # BOOTROM: The bootrom value is optional, and is the name of the ROM image # to load. Note that this feature is only implemented for the PCI version of @@ -873,11 +1075,14 @@ private_colormap: enabled=0 # you can use the following 'ethmod's to simulate a virtual network. # null: All packets are discarded, but logged to a few files. # vde: Virtual Distributed Ethernet -# vnet: ARP, ICMP-echo(ping), DHCP and read/write TFTP are simulated. +# vnet: ARP, ICMP-echo(ping), DHCP, DNS, FTP and TFTP are simulated. # The virtual host uses 192.168.10.1. -# DHCP assigns 192.168.10.2 to the guest. -# TFTP uses the 'ethdev' value for the root directory and doesn't -# overwrite files. +# DHCP assigns 192.168.10.15 to the guest. +# FTP/TFTP using the 'ethdev' value for the root directory. +# TFTP doesn't overwrite files, DNS for server and client only. +# socket: Connect up to 6 Bochs instances with external program 'bxhub' +# (simulating an ethernet hub). It provides the same services as the +# 'vnet' module and assigns IP addresses like 'slirp' (10.0.2.x). # #======================================================================= # ne2k: ioaddr=0x300, irq=9, mac=fe:fd:00:00:00:01, ethmod=fbsd, ethdev=en0 #macosx @@ -889,111 +1094,62 @@ private_colormap: enabled=0 # ne2k: ioaddr=0x300, irq=9, mac=b0:c4:20:00:00:01, ethmod=null, ethdev=eth0 # ne2k: ioaddr=0x300, irq=9, mac=b0:c4:20:00:00:01, ethmod=vde, ethdev="/tmp/vde.ctl" # ne2k: ioaddr=0x300, irq=9, mac=b0:c4:20:00:00:01, ethmod=vnet, ethdev="c:/temp" -# ne2k: mac=b0:c4:20:00:00:01, ethmod=slirp, script=/usr/local/bin/slirp, bootrom=ne2k_pci.rom +# ne2k: mac=b0:c4:20:00:00:01, ethmod=socket, ethdev=40000 # use localhost +# ne2k: mac=b0:c4:20:00:00:01, ethmod=socket, ethdev=mymachine:40000 +# ne2k: mac=b0:c4:20:00:00:01, ethmod=slirp, script=slirp.conf, bootrom=ne2k_pci.rom #======================================================================= -# pnic: Bochs/Etherboot pseudo-NIC +# pcipnic: Bochs/Etherboot pseudo-NIC # # Format: -# pnic: enabled=1, mac=MACADDR, ethmod=MODULE, ethdev=DEVICE, script=SCRIPT, -# bootrom=BOOTROM +# pcipnic: enabled=1, mac=MACADDR, ethmod=MODULE, ethdev=DEVICE, script=SCRIPT, +# bootrom=BOOTROM # # The pseudo-NIC accepts the same syntax (for mac, ethmod, ethdev, script, # bootrom) and supports the same networking modules as the NE2000 adapter. -# In addition to this, it must be loaded with 'plugin_ctrl' and assigned -# to a PCI slot. #======================================================================= -#pnic: enabled=1, mac=b0:c4:20:00:00:00, ethmod=vnet +#pcipnic: enabled=1, mac=b0:c4:20:00:00:00, ethmod=vnet #======================================================================= # e1000: Intel(R) 82540EM Gigabit Ethernet adapter # # Format: -# e1000: enabled=1, mac=MACADDR, ethmod=MODULE, ethdev=DEVICE, script=SCRIPT -# bootrom=BOOTROM +# e1000: card=CARD, enabled=1, mac=MACADDR, ethmod=MODULE, ethdev=DEVICE, +# script=SCRIPT, bootrom=BOOTROM # -# The E1000 accepts the same syntax (for mac, ethmod, ethdev, script, bootrom) -# and supports the same networking modules as the NE2000 adapter. In addition -# to this, it must be loaded with 'plugin_ctrl' and assigned to a PCI slot. +# The E1000 accepts the same syntax (for card, mac, ethmod, ethdev, script, +# bootrom) and supports the same networking modules as the NE2000 adapter. +# It also supports up to 4 devices selected with the card parameter. #======================================================================= -#e1000: enabled=1, mac=52:54:00:12:34:56, ethmod=slirp, script=/usr/local/bin/slirp +#e1000: enabled=1, mac=52:54:00:12:34:56, ethmod=slirp, script=slirp.conf e1000: enabled=1, mac=52:54:00:12:34:56, ethmod=tuntap, ethdev=/dev/net/tun:tap0 -#======================================================================= -# KEYBOARD_MAPPING: -# This enables a remap of a physical localized keyboard to a -# virtualized us keyboard, as the PC architecture expects. -# If enabled, the keymap file must be specified. -# -# Examples: -# keyboard_mapping: enabled=1, map=gui/keymaps/x11-pc-de.map -#======================================================================= -keyboard_mapping: enabled=0, map= - -#======================================================================= -# KEYBOARD_TYPE: -# Type of keyboard return by a "identify keyboard" command to the -# keyboard controler. It must be one of "xt", "at" or "mf". -# Defaults to "mf". It should be ok for almost everybody. A known -# exception is french macs, that do have a "at"-like keyboard. -# -# Examples: -# keyboard_type: mf -#======================================================================= -#keyboard_type: mf - -#======================================================================= -# USER_SHORTCUT: -# This defines the keyboard shortcut to be sent when you press the "user" -# button in the headerbar. The shortcut string is a combination of maximum -# 3 key names (listed below) separated with a '-' character. -# Valid key names: -# "alt", "bksl", "bksp", "ctrl", "del", "down", "end", "enter", "esc", -# "f1", ... "f12", "home", "ins", "left", "menu", "minus", "pgdwn", "pgup", -# "plus", "right", "shift", "space", "tab", "up", "win", "print" and "power". -# -# Example: -# user_shortcut: keys=ctrl-alt-del -#======================================================================= -#user_shortcut: keys=ctrl-alt-del - -#======================================================================= -# PCI: -# This option controls the presence of a PCI chipset in Bochs. Currently it only -# supports the i440FX chipset. You can also specify the devices connected to -# PCI slots. Up to 5 slots are available. These devices are currently supported: -# cirrus, e1000, es1370, ne2k, pcivga, pcidev, pcipnic, usb_ohci and usb_xhci. -# -# Example: -# pci: enabled=1, chipset=i440fx, slot1=pcivga, slot2=ne2k -#======================================================================= -pci: enabled=1, chipset=i440fx, slot1=e1000 - #======================================================================= # USB_UHCI: # This option controls the presence of the USB root hub which is a part # of the i440FX PCI chipset. With the portX parameter you can connect devices -# to the hub (currently supported: 'mouse', 'tablet', 'keypad', 'disk', 'cdrom' -# 'hub' and 'printer'). NOTE: UHCI must be loaded with 'plugin_ctrl'. -# -# The optionsX parameter can be used to assign specific options to the device -# connected to the corresponding USB port. Currently this feature is only used -# to set the speed reported by device and by the 'disk' device to specify -# an alternative redolog file of some image modes. +# to the hub (currently supported: 'mouse', 'tablet', 'keypad', 'keyboard', +# 'disk', 'cdrom', 'floppy', 'hub' and 'printer'). # # If you connect the mouse or tablet to one of the ports, Bochs forwards the # mouse movement data to the USB device instead of the selected mouse type. # When connecting the keypad to one of the ports, Bochs forwards the input of -# the numeric keypad to the USB device instead of the PS/2 keyboard. +# the numeric keypad to the USB device instead of the PS/2 keyboard. If the +# keyboard is selected, all key events are sent to the USB device. # -# To connect a 'flat' mode image as an USB hardisk you can use the 'disk' device +# To connect a 'flat' mode image as a USB hardisk you can use the 'disk' device # with the path to the image separated with a colon. To use other disk image modes # similar to ATA disks the syntax 'disk:mode:filename' must be used (see below). # -# To emulate an USB cdrom you can use the 'cdrom' device name and the path to +# To emulate a USB cdrom you can use the 'cdrom' device name and the path to # an ISO image or raw device name also separated with a colon. An option to # insert/eject media is available in the runtime configuration. # +# To emulate a USB floppy you can use the 'floppy' device with the path to the +# image separated with a colon. To use the VVFAT image mode similar to the +# legacy floppy the syntax 'floppy:vvfat:directory' must be used (see below). +# An option to insert/eject media is available in the runtime configuration. +# # The device name 'hub' connects an external hub with max. 8 ports (default: 4) # to the root hub. To specify the number of ports you have to add the value # separated with a colon. Connecting devices to the external hub ports is only @@ -1001,45 +1157,77 @@ pci: enabled=1, chipset=i440fx, slot1=e1000 # # The device 'printer' emulates the HP Deskjet 920C printer. The PCL data is # sent to a file specified in bochsrc.txt. The current code appends the PCL -# code to the file if the file already existed. It would probably be nice to -# overwrite the file instead, asking user first. +# code to the file if the file already existed. The output file can be +# changed at runtime. +# +# The optionsX parameter can be used to assign specific options to the device +# connected to the corresponding USB port. Currently this feature is used to +# set the speed reported by device ('low', 'full', 'high' or 'super'). The +# available speed choices depend on both HC and device. The option 'debug' turns +# on debug output for the device at connection time. +# For the USB 'disk' device the optionsX parameter can be used to specify an +# alternative redolog file (journal) of some image modes. For 'vvfat' mode USB +# disks the optionsX parameter can be used to specify the disk size (range +# 128M ... 128G). If the size is not specified, it defaults to 504M. +# For the USB 'floppy' device the optionsX parameter can be used to specify an +# alternative device ID to be reported. Currently only the model "teac" is +# supported (can fix hw detection in some guest OS). The USB floppy also +# accepts the parameter "write_protected" with valid values 0 and 1 to select +# the access mode (default is 0). #======================================================================= #usb_uhci: enabled=1 #usb_uhci: enabled=1, port1=mouse, port2=disk:usbstick.img #usb_uhci: enabled=1, port1=hub:7, port2=disk:growing:usbdisk.img -#usb_uhci: enabled=1, port2=disk:undoable:usbdisk.img, options1=journal:redo.log +#usb_uhci: enabled=1, port2=disk:undoable:usbdisk.img, options2=journal:redo.log +#usb_uhci: enabled=1, port2=disk:usbdisk2.img, options2=sect_size:1024 +#usb_uhci: enabled=1, port2=disk:vvfat:vvfat, options2="debug,speed:full" #usb_uhci: enabled=1, port1=printer:printdata.bin, port2=cdrom:image.iso +#usb_uhci: enabled=1, port2=floppy:vvfat:diskette, options2="model:teac" #======================================================================= # USB_OHCI: # This option controls the presence of the USB OHCI host controller with a -# 2-port hub. The portX option accepts the same device types with the same -# syntax as the UHCI controller (see above). The OHCI HC must be assigned to -# a PCI slot and loaded with 'plugin_ctrl'. +# 2-port hub. The portX parameter accepts the same device types with the same +# syntax as the UHCI controller (see above). The optionsX parameter is also +# available on OHCI. #======================================================================= #usb_ohci: enabled=1 #usb_ohci: enabled=1, port1=printer:usbprinter.bin +#======================================================================= +# USB_EHCI: +# This option controls the presence of the USB EHCI host controller with a +# 6-port hub. The portX parameter accepts the same device types with the +# same syntax as the UHCI controller (see above). The optionsX parameter is +# also available on EHCI. +#======================================================================= +#usb_ehci: enabled=1 + #======================================================================= # USB_XHCI: -# This option controls the presence of the experimental USB xHCI host controller -# with a 4-port hub. The portX option accepts the same device types with the -# same syntax as the UHCI controller (see above). The xHCI HC must be assigned -# to a PCI slot and loaded with 'plugin_ctrl'. +# This option controls the presence of the USB xHCI host controller with a +# 4-port hub. The portX parameter accepts the same device types with the +# same syntax as the UHCI controller (see above). The optionsX parameter is +# also available on xHCI. NOTE: port 1 and 2 are USB3 and only support +# super-speed devices, but port 3 and 4 are USB2 and support speed settings +# low, full and high. #======================================================================= #usb_xhci: enabled=1 #======================================================================= -# CMOSIMAGE: -# This defines image file that can be loaded into the CMOS RAM at startup. -# The rtc_init parameter controls whether initialize the RTC with values stored -# in the image. By default the time0 argument given to the clock option is used. -# With 'rtc_init=image' the image is the source for the initial time. -# -# Example: -# cmosimage: file=cmos.img, rtc_init=image +# PCIDEV: +# PCI host device mapping +# WARNING: This Bochs feature is not maintained yet and may fail. #======================================================================= -#cmosimage: file=cmos.img, rtc_init=time0 +#pcidev: vendor=0x1234, device=0x5678 + +#======================================================================= +# GDBSTUB: +# Enable GDB stub. See user documentation for details. +# Default value is enabled=0. +# WARNING: This Bochs feature is not maintained yet and may fail. +#======================================================================= +#gdbstub: enabled=0, port=1234, text_base=0, data_base=0, bss_base=0 #======================================================================= # MAGIC_BREAK: @@ -1050,9 +1238,22 @@ pci: enabled=1, chipset=i440fx, slot1=e1000 # Example: # magic_break: enabled=1 #======================================================================= -#magic_break: enabled=1 magic_break: enabled=1 +#======================================================================= +# DEBUG_SYMBOLS: +# This loads symbols from the specified file for use in Bochs' internal +# debugger. Symbols are loaded into global context. This is equivalent to +# issuing ldsym debugger command at start up. +# +# Example: +# debug_symbols: file="kernel.sym" +# debug_symbols: file="kernel.sym", offset=0x80000000 +#======================================================================= +#debug_symbols: file="kernel.sym" + +#print_timestamps: enabled=1 + #======================================================================= # PORT_E9_HACK: # The 0xE9 port doesn't exists in normal ISA architecture. However, we @@ -1069,43 +1270,16 @@ magic_break: enabled=1 port_e9_hack: enabled=1 #======================================================================= -# DEBUG_SYMBOLS: -# This loads symbols from the specified file for use in Bochs' internal -# debugger. Symbols are loaded into global context. This is equivalent to -# issuing ldsym debugger command at start up. +# fullscreen: ONLY IMPLEMENTED ON AMIGA +# Request that Bochs occupy the entire screen instead of a +# window. # -# Example: -# debug_symbols: file="kernel.sym" -# debug_symbols: file="kernel.sym", offset=0x80000000 +# Examples: +# fullscreen: enabled=0 +# fullscreen: enabled=1 #======================================================================= -#debug_symbols: file="kernel.sym" - -#======================================================================= -# other stuff -#======================================================================= -#load32bitOSImage: os=nullkernel, path=../kernel.img, iolog=../vga_io.log -#load32bitOSImage: os=linux, path=../linux.img, iolog=../vga_io.log, initrd=../initrd.img -#print_timestamps: enabled=1 - -#------------------------- -# PCI host device mapping -#------------------------- -#pcidev: vendor=0x1234, device=0x5678 - -#======================================================================= -# GDBSTUB: -# Enable GDB stub. See user documentation for details. -# Default value is enabled=0. -#======================================================================= -#gdbstub: enabled=0, port=1234, text_base=0, data_base=0, bss_base=0 - -#======================================================================= -# USER_PLUGIN: -# Load user-defined plugin. This option is available only if Bochs is -# compiled with plugin support. Maximum 8 different plugins are supported. -# See the example in the Bochs sources how to write a plugin device. -#======================================================================= -#user_plugin: name=testdev +#fullscreen: enabled=0 +#screenmode: name="sample" #======================================================================= # for Macintosh, use the style of pathnames in the following diff --git a/src/Makefile b/src/Makefile index 039538218..9ea300a5f 100644 --- a/src/Makefile +++ b/src/Makefile @@ -10,6 +10,7 @@ LDFLAGS := HOST_CFLAGS := MAKEDEPS := Makefile CROSS_COMPILE ?= $(CROSS) +SYMBOL_PREFIX := ############################################################################### # @@ -48,7 +49,6 @@ ELF2EFI32 := ./util/elf2efi32 ELF2EFI64 := ./util/elf2efi64 EFIROM := ./util/efirom EFIFATBIN := ./util/efifatbin -ICCFIX := ./util/iccfix EINFO := ./util/einfo GENKEYMAP := ./util/genkeymap.pl DOXYGEN := doxygen @@ -74,6 +74,7 @@ SRCDIRS += drivers/net/phantom SRCDIRS += drivers/net/vxge SRCDIRS += drivers/net/efi SRCDIRS += drivers/net/tg3 +SRCDIRS += drivers/net/bnxt SRCDIRS += drivers/net/sfc SRCDIRS += drivers/block SRCDIRS += drivers/nvs diff --git a/src/Makefile.efi b/src/Makefile.efi index 151b33186..bd479b3da 100644 --- a/src/Makefile.efi +++ b/src/Makefile.efi @@ -1,5 +1,13 @@ # -*- makefile -*- : Force emacs to use Makefile mode +# Enable stack protection if available +# +SPG_TEST = $(CC) -fstack-protector-strong -mstack-protector-guard=global \ + -x c -c /dev/null -o /dev/null >/dev/null 2>&1 +SPG_FLAGS := $(shell $(SPG_TEST) && $(ECHO) '-fstack-protector-strong ' \ + '-mstack-protector-guard=global') +CFLAGS += $(SPG_FLAGS) + # The EFI linker script # LDSCRIPT = scripts/efi.lds @@ -35,12 +43,13 @@ $(BIN)/%.drv.efi : $(BIN)/%.efidrv $(BIN)/%.efirom : $(BIN)/%.efidrv $(EFIROM) $(QM)$(ECHO) " [FINISH] $@" - $(Q)$(EFIROM) -v $(TGT_PCI_VENDOR) -d $(TGT_PCI_DEVICE) $< $@ + $(Q)$(EFIROM) -v $(firstword $(TGT_PCI_VENDOR) 0) \ + -d $(firstword $(TGT_PCI_DEVICE) 0) -c $< $@ $(BIN)/efidrv.cab : $(BIN)/alldrv.efis # $(ALL_drv.efi) is not yet defined $(QM)$(ECHO) " [CAB] $@" $(Q)$(LCAB) -n -q $(ALL_drv.efi) $@ -$(BIN)/%.usb : $(BIN)/%.efi - $(QM)$(ECHO) " [GENEFIDSK] $@" - $(Q)bash util/genefidsk -o $@ -b $(EFI_BOOT_FILE) $< +$(BIN)/%.iso $(BIN)/%.usb : $(BIN)/%.efi util/genfsimg + $(QM)$(ECHO) " [GENFSIMG] $@" + $(Q)util/genfsimg -o $@ $< diff --git a/src/Makefile.housekeeping b/src/Makefile.housekeeping index 1dd147949..0deb15c10 100644 --- a/src/Makefile.housekeeping +++ b/src/Makefile.housekeeping @@ -76,9 +76,7 @@ CCDEFS := $(shell $(CC) -E -x c -c /dev/null -dM | cut -d" " -f2) ccdefs: @$(ECHO) $(CCDEFS) -ifeq ($(filter __ICC,$(CCDEFS)),__ICC) -CCTYPE := icc -else +ifeq ($(filter __GNUC__,$(CCDEFS)),__GNUC__) CCTYPE := gcc endif cctype: @@ -113,6 +111,13 @@ $(warning Use GNU ld instead) $(error Unsuitable build environment found) endif +OBJCOPY_ETC_BANNER := $(shell $(OBJCOPY) --version | grep 'elftoolchain') +ifneq ($(OBJCOPY_ETC_BANNER),) +$(warning The elftoolchain objcopy is unsuitable for building iPXE) +$(warning Use binutils objcopy instead) +$(error Unsuitable build environment found) +endif + ############################################################################### # # Check if $(eval ...) is available to use @@ -146,17 +151,6 @@ define NEWLINE endef -# Some widespread patched versions of gcc include -fstack-protector by -# default, even when -ffreestanding is specified. We therefore need -# to disable -fstack-protector if the compiler supports it. -# -ifeq ($(CCTYPE),gcc) -SP_TEST = $(CC) -fno-stack-protector -x c -c /dev/null \ - -o /dev/null >/dev/null 2>&1 -SP_FLAGS := $(shell $(SP_TEST) && $(ECHO) '-fno-stack-protector') -WORKAROUND_CFLAGS += $(SP_FLAGS) -endif - # gcc 4.4 generates .eh_frame sections by default, which distort the # output of "size". Inhibit this. # @@ -353,7 +347,7 @@ arch : # Determine build platform DEFAULT_PLATFORM := pcbios PLATFORM := $(firstword $(BIN_PLATFORM) $(DEFAULT_PLATFORM)) -CFLAGS += -DPLATFORM=$(PLATFORM) +CFLAGS += -DPLATFORM=$(PLATFORM) -DPLATFORM_$(PLATFORM) platform : @$(ECHO) $(PLATFORM) @@ -378,6 +372,43 @@ INCDIRS += arch/$(ARCH)/include INCDIRS += arch/$(ARCH)/include/$(PLATFORM) endif +############################################################################### +# +# Especially ugly workarounds + +# Some widespread patched versions of gcc include -fPIE -Wl,-pie by +# default. Note that gcc will exit *successfully* if it fails to +# recognise an option that starts with "no", so we have to test for +# output on stderr instead of checking the exit status. +# +# Current versions of gcc require -no-pie; older versions require +# -nopie. We therefore test for both. +# +# This workaround must be determined only after the +# architecture-specific Makefile has been included, since some +# platforms (e.g. bin-x86_64-efi) will explicitly require the use of +# -fpie. +# +ifeq ($(filter -fpie,$(CFLAGS)),) +ifeq ($(CCTYPE),gcc) +PIE_TEST = [ -z "`$(CC) -fno-PIE -no-pie -x c -c /dev/null -o /dev/null 2>&1`" ] +PIE_FLAGS := $(shell $(PIE_TEST) && $(ECHO) '-fno-PIE -no-pie') +PIE_TEST2 = [ -z "`$(CC) -fno-PIE -nopie -x c -c /dev/null -o /dev/null 2>&1`" ] +PIE_FLAGS2 := $(shell $(PIE_TEST2) && $(ECHO) '-fno-PIE -nopie') +WORKAROUND_CFLAGS += $(PIE_FLAGS) $(PIE_FLAGS2) +endif +endif + +# Some widespread patched versions of gcc include -fcf-protection=full +# by default. +# +ifeq ($(CCTYPE),gcc) +CFP_TEST = $(CC) -fcf-protection=none -x c -c /dev/null -o /dev/null \ + >/dev/null 2>&1 +CFP_FLAGS := $(shell $(CFP_TEST) && $(ECHO) '-fcf-protection=none') +WORKAROUND_CFLAGS += $(CFP_FLAGS) +endif + ############################################################################### # # Source file handling @@ -415,6 +446,13 @@ ifdef BIN incdirs : @$(ECHO) $(INCDIRS) +# Inhibit -fstack-protector (which is implicitly enabled in some +# patched gcc versions) unless explicitly mentioned in CFLAGS. +# +ifeq ($(findstring -fstack-protector,$(CFLAGS)),) +CFLAGS += -fno-stack-protector +endif + # Common flags # CFLAGS += $(foreach INC,$(INCDIRS),-I$(INC)) @@ -422,35 +460,10 @@ CFLAGS += -Os CFLAGS += -g ifeq ($(CCTYPE),gcc) CFLAGS += -ffreestanding +CFLAGS += -fcommon CFLAGS += -Wall -W -Wformat-nonliteral HOST_CFLAGS += -Wall -W -Wformat-nonliteral endif -ifeq ($(CCTYPE),icc) -CFLAGS += -fno-builtin -CFLAGS += -no-ip -CFLAGS += -no-gcc -CFLAGS += -diag-disable 111 # Unreachable code -CFLAGS += -diag-disable 128 # Unreachable loop -CFLAGS += -diag-disable 170 # Array boundary checks -CFLAGS += -diag-disable 177 # Unused functions -CFLAGS += -diag-disable 181 # printf() format checks -CFLAGS += -diag-disable 188 # enum strictness -CFLAGS += -diag-disable 193 # Undefined preprocessor identifiers -CFLAGS += -diag-disable 280 # switch ( constant ) -CFLAGS += -diag-disable 310 # K&R parameter lists -CFLAGS += -diag-disable 424 # Extra semicolon -CFLAGS += -diag-disable 589 # Declarations mid-code -CFLAGS += -diag-disable 593 # Unused variables -CFLAGS += -diag-disable 810 # Casting ints to smaller ints -CFLAGS += -diag-disable 981 # Sequence point violations -CFLAGS += -diag-disable 1292 # Ignored attributes -CFLAGS += -diag-disable 1338 # void pointer arithmetic -CFLAGS += -diag-disable 1361 # Variable-length arrays -CFLAGS += -diag-disable 1418 # Missing prototypes -CFLAGS += -diag-disable 1419 # Missing prototypes -CFLAGS += -diag-disable 1599 # Hidden variables -CFLAGS += -Wall -Wmissing-declarations -endif CFLAGS += $(WORKAROUND_CFLAGS) $(EXTRA_CFLAGS) ASFLAGS += $(WORKAROUND_ASFLAGS) $(EXTRA_ASFLAGS) LDFLAGS += $(WORKAROUND_LDFLAGS) $(EXTRA_LDFLAGS) @@ -464,35 +477,6 @@ ASFLAGS += --fatal-warnings HOST_CFLAGS += -Werror endif -# Function trace recorder state in the last build. This is needed -# in order to correctly rebuild whenever the function recorder is -# enabled/disabled. -# -FNREC_STATE := $(BIN)/.fnrec.state -ifeq ($(wildcard $(FNREC_STATE)),) -FNREC_OLD := -else -FNREC_OLD := $(shell cat $(FNREC_STATE)) -endif -ifeq ($(FNREC_OLD),$(FNREC)) -$(FNREC_STATE) : -else -$(FNREC_STATE) : clean -$(shell $(ECHO) "$(FNREC)" > $(FNREC_STATE)) -endif - -VERYCLEANUP += $(FNREC_STATE) -MAKEDEPS += $(FNREC_STATE) - -ifeq ($(FNREC),1) -# Enabling -finstrument-functions affects gcc's analysis and leads to spurious -# warnings about use of uninitialised variables. -# -CFLAGS += -Wno-uninitialized -CFLAGS += -finstrument-functions -CFLAGS += -finstrument-functions-exclude-file-list=core/fnrec.c -endif - # Enable per-item sections and section garbage collection. Note that # some older versions of gcc support -fdata-sections but treat it as # implying -fno-common, which would break our build. Some other older @@ -543,16 +527,6 @@ OBJ_CFLAGS = $(CFLAGS_$(OBJECT)) -DOBJECT=$(subst -,_,$(OBJECT)) $(BIN)/%.flags : @$(ECHO) $(OBJ_CFLAGS) -# ICC requires postprocessing objects to fix up table alignments -# -ifeq ($(CCTYPE),icc) -POST_O = && $(ICCFIX) $@ -POST_O_DEPS := $(ICCFIX) -else -POST_O := -POST_O_DEPS := -endif - # Debug level calculations # DBGLVL_MAX = -DDBGLVL_MAX=$(firstword $(subst ., ,$(1))) @@ -562,9 +536,9 @@ DBGLVL = $(call DBGLVL_MAX,$(1)) $(call DBGLVL_DFLT,$(1)) # Rules for specific object types. # COMPILE_c = $(CC) $(CFLAGS) $(CFLAGS_c) $(OBJ_CFLAGS) -RULE_c = $(Q)$(COMPILE_c) -c $< -o $@ $(POST_O) +RULE_c = $(Q)$(COMPILE_c) -c $< -o $@ RULE_c_to_ids.o = $(Q)$(ECHO_E) '$(OBJ_IDS_ASM_NL)' | $(ASSEMBLE_S) -o $@ -RULE_c_to_dbg%.o= $(Q)$(COMPILE_c) $(call DBGLVL,$*) -c $< -o $@ $(POST_O) +RULE_c_to_dbg%.o= $(Q)$(COMPILE_c) $(call DBGLVL,$*) -c $< -o $@ RULE_c_to_c = $(Q)$(COMPILE_c) -E -c $< > $@ RULE_c_to_s = $(Q)$(COMPILE_c) -S -g0 -c $< -o $@ @@ -804,6 +778,38 @@ include/ipxe/profile.h : $(PROFILE_LIST) .PRECIOUS : include/ipxe/profile.h +# (Single-element) list of function recorder configuration +# +FNREC_LIST := $(BIN)/.fnrec.list +ifeq ($(wildcard $(FNREC_LIST)),) +FNREC_OLD := +else +FNREC_OLD := $(shell cat $(FNREC_LIST)) +endif +ifneq ($(FNREC_OLD),$(FNREC)) +$(shell $(ECHO) "$(FNREC)" > $(FNREC_LIST)) +endif + +$(FNREC_LIST) : $(MAKEDEPS) + +VERYCLEANUP += $(FNREC_LIST) + +# Function recorder configuration +# +ifeq ($(FNREC),1) +# Enabling -finstrument-functions affects gcc's analysis and leads to spurious +# warnings about use of uninitialised variables. +# +CFLAGS += -Wno-uninitialized +CFLAGS += -finstrument-functions +CFLAGS += -finstrument-functions-exclude-file-list=core/fnrec.c +endif + +include/compiler.h : $(FNREC_LIST) + $(Q)$(TOUCH) $@ + +.PRECIOUS : include/compiler.h + # These files use .incbin inline assembly to include a binary file. # Unfortunately ccache does not detect this dependency and caches # builds even when the binary file has changed. @@ -859,7 +865,7 @@ define deps_template_parts @$(MKDIR) -p $(BIN)/deps/$(dir $(1)) $(Q)$(CPP) $(CFLAGS) $(CFLAGS_$(2)) $(CFLAGS_$(3)) -DOBJECT=$(3) \ -Wno-error -M $(1) -MG -MP | \ - sed 's/\.o\s*:/_DEPS +=/' > $(BIN)/deps/$(1).d + sed 's/\.o[[:blank:]]*:/_DEPS +=/' > $(BIN)/deps/$(1).d endef # rules_template : generate rules for a given source file @@ -875,7 +881,7 @@ endef # $(3) is the source base name (e.g. "rtl8139") # define rules_template_parts -$$(BIN)/$(3).o : $(1) $$(MAKEDEPS) $$(POST_O_DEPS) $$($(3)_DEPS) +$$(BIN)/$(3).o : $(1) $$(MAKEDEPS) $$($(3)_DEPS) $$(QM)$(ECHO) " [BUILD] $$@" $$(RULE_$(2)) BOBJS += $$(BIN)/$(3).o @@ -890,7 +896,7 @@ endef # $(4) is the destination type (e.g. "dbg%.o") # define rules_template_target -$$(BIN)/$(3).$(4) : $(1) $$(MAKEDEPS) $$(POST_O_DEPS) $$($(3)_DEPS) +$$(BIN)/$(3).$(4) : $(1) $$(MAKEDEPS) $$($(3)_DEPS) $$(QM)$(ECHO) " [BUILD] $$@" $$(RULE_$(2)_to_$(4)) $(TGT)_OBJS += $$(BIN)/$(3).$(4) @@ -1096,9 +1102,10 @@ TGT_LD_ENTRY = _$(TGT_PREFIX)_start # TGT_LD_FLAGS = $(foreach SYM,$(TGT_LD_ENTRY) $(TGT_LD_DRIVERS) \ $(TGT_LD_DEVLIST) obj_config obj_config_$(PLATFORM),\ - -u $(SYM) --defsym check_$(SYM)=$(SYM) ) \ + -u $(SYMBOL_PREFIX)$(SYM) \ + --defsym check_$(SYM)=$(SYMBOL_PREFIX)$(SYM) ) \ $(patsubst %,--defsym %,$(TGT_LD_IDS)) \ - -e $(TGT_LD_ENTRY) + -e $(SYMBOL_PREFIX)$(TGT_LD_ENTRY) # Calculate list of debugging versions of objects to be included in # the target. @@ -1159,18 +1166,38 @@ BLIB = $(BIN)/blib.a $(BLIB) : $(BLIB_OBJS) $(BLIB_LIST) $(MAKEDEPS) $(Q)$(RM) $(BLIB) $(QM)$(ECHO) " [AR] $@" - $(Q)$(AR) r $@ $(sort $(BLIB_OBJS)) - $(Q)$(RANLIB) $@ + $(Q)$(AR) rD $@ $(sort $(BLIB_OBJS)) + $(Q)$(OBJCOPY) --enable-deterministic-archives \ + --prefix-symbols=$(SYMBOL_PREFIX) $@ + $(Q)$(RANLIB) -D $@ blib : $(BLIB) # Command to generate build ID. Must be unique for each $(BIN)/%.tmp, # even within the same build run. # -BUILD_ID_CMD := perl -e 'printf "0x%08x", int ( rand ( 0xffffffff ) );' +# The build ID is supposed to be collision-free across all ROMs that +# might ever end up installed in the same system. It doesn't just +# disambiguate targets within a single build; it also disambiguates +# different builds (such as builds for multiple ROMs all built from +# the same blib.a). +# +BUILD_ID_CMD = cat $^ | cksum | awk '{print $$1}' # Build timestamp # +# Used as a means to automatically select the newest version of iPXE +# if multiple iPXE drivers are loaded concurrently in a UEFI system. +# +# It gets rounded down to the nearest minute when used for this +# purpose. +# +ifdef SOURCE_DATE_EPOCH +BUILD_TIMESTAMP := $(SOURCE_DATE_EPOCH) +else ifdef GITVERSION +BUILD_TIMESTAMP := $(shell git log -1 --pretty=%ct) +else BUILD_TIMESTAMP := $(shell date +%s) +endif # Build version # @@ -1183,6 +1210,7 @@ $(BIN)/version.%.o : core/version.c $(MAKEDEPS) $(GIT_INDEX) -DVERSION_PATCH=$(VERSION_PATCH) \ -DVERSION="\"$(VERSION)\"" \ -c $< -o $@ + $(Q)$(OBJCOPY) --prefix-symbols=$(SYMBOL_PREFIX) $@ # Build an intermediate object file from the objects required for the # specified target. @@ -1190,7 +1218,7 @@ $(BIN)/version.%.o : core/version.c $(MAKEDEPS) $(GIT_INDEX) $(BIN)/%.tmp : $(BIN)/version.%.o $(BLIB) $(MAKEDEPS) $(LDSCRIPT) $(QM)$(ECHO) " [LD] $@" $(Q)$(LD) $(LDFLAGS) -T $(LDSCRIPT) $(TGT_LD_FLAGS) $< $(BLIB) -o $@ \ - --defsym _build_id=`$(BUILD_ID_CMD)` \ + --defsym _build_id=$(shell $(BUILD_ID_CMD)) \ --defsym _build_timestamp=$(BUILD_TIMESTAMP) \ -Map $(BIN)/$*.tmp.map $(Q)$(OBJDUMP) -ht $@ | $(PERL) $(SORTOBJDUMP) >> $(BIN)/$*.tmp.map @@ -1401,7 +1429,7 @@ $(ELF2EFI64) : util/elf2efi.c $(MAKEDEPS) $(Q)$(HOST_CC) $(HOST_CFLAGS) -idirafter include -DEFI_TARGET64 $< -o $@ CLEANUP += $(ELF2EFI64) -$(EFIROM) : util/efirom.c $(MAKEDEPS) +$(EFIROM) : util/efirom.c util/eficompress.c $(MAKEDEPS) $(QM)$(ECHO) " [HOSTCC] $@" $(Q)$(HOST_CC) $(HOST_CFLAGS) -idirafter include -o $@ $< CLEANUP += $(EFIROM) @@ -1411,15 +1439,6 @@ $(EFIFATBIN) : util/efifatbin.c $(MAKEDEPS) $(Q)$(HOST_CC) $(HOST_CFLAGS) -idirafter include -o $@ $< CLEANUP += $(EFIFATBIN) -############################################################################### -# -# The ICC fixup utility -# -$(ICCFIX) : util/iccfix.c $(MAKEDEPS) - $(QM)$(ECHO) " [HOSTCC] $@" - $(Q)$(HOST_CC) $(HOST_CFLAGS) -idirafter include -o $@ $< -CLEANUP += $(ICCFIX) - ############################################################################### # # The error usage information utility @@ -1559,13 +1578,14 @@ hci/keymap/keymap_%.c : # ifeq ($(NUM_BINS),0) -ALLBINS := bin{,-*} -CLEANUP := $(patsubst $(BIN)/%,$(ALLBINS)/%,$(CLEANUP)) -VERYCLEANUP := $(patsubst $(BIN)/%,$(ALLBINS)/%,$(VERYCLEANUP)) +ALLBINS := bin bin-* +ALLBIN = $(foreach B,$(ALLBINS),$(patsubst $(BIN)/%,$(B)/%,$(1))) +CLEANUP := $(foreach C,$(CLEANUP),$(call ALLBIN,$(C))) +VERYCLEANUP := $(foreach V,$(VERYCLEANUP),$(call ALLBIN,$(V))) endif clean : - $(RM) $(CLEANUP) + $(RM) -r $(CLEANUP) veryclean : clean $(RM) -r $(VERYCLEANUP) diff --git a/src/Makefile.linux b/src/Makefile.linux new file mode 100644 index 000000000..85d9c6438 --- /dev/null +++ b/src/Makefile.linux @@ -0,0 +1,51 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# Prefix all iPXE symbols to avoid collisions with platform libraries +# +SYMBOL_PREFIX = _ipxe__ + +# Enable valgrind +# +CFLAGS += -UNVALGRIND + +# Use a two-stage link +# +LDFLAGS += -r -d + +# Source directories +# +SRCDIRS += drivers/linux +SRCDIRS += interface/linux +NON_AUTO_SRCS += interface/linux/linux_api.c + +# Media types +# +NON_AUTO_MEDIA = linux + +# Compiler flags for building host API wrapper +# +LINUX_CFLAGS += -Os -idirafter include -DSYMBOL_PREFIX=$(SYMBOL_PREFIX) + +# Check for libslirp +# +LIBSLIRP_TEST = $(CC) $(LINUX_CFLAGS) -x c /dev/null -nostartfiles \ + -include slirp/libslirp.h -lslirp \ + -o /dev/null >/dev/null 2>&1 +WITH_LIBSLIRP := $(shell $(LIBSLIRP_TEST) && $(ECHO) yes) +ifneq ($(WITH_LIBSLIRP),) +LINUX_CFLAGS += -DHAVE_LIBSLIRP +LINUX_LIBS += -lslirp +endif + +# Host API wrapper +# +$(BIN)/linux_api.o : interface/linux/linux_api.c include/ipxe/linux_api.h \ + include/ipxe/slirp.h $(MAKEDEPS) + $(QM)$(ECHO) " [BUILD] $@" + $(Q)$(CC) $(LINUX_CFLAGS) $(WORKAROUND_CFLAGS) -o $@ -c $< + +# Rule to generate final binary +# +$(BIN)/%.linux : $(BIN)/%.linux.tmp $(BIN)/linux_api.o + $(QM)$(ECHO) " [FINISH] $@" + $(Q)$(CC) $(LINUX_CFLAGS) $(WORKAROUND_CFLAGS) -o $@ $^ $(LINUX_LIBS) diff --git a/src/arch/i386/Makefile b/src/arch/i386/Makefile index b7c2792d9..e59f05fc8 100644 --- a/src/arch/i386/Makefile +++ b/src/arch/i386/Makefile @@ -69,22 +69,6 @@ CFLAGS += -fshort-wchar # CFLAGS += -Ui386 -# Some widespread patched versions of gcc include -fPIE -Wl,-pie by -# default. Note that gcc will exit *successfully* if it fails to -# recognise an option that starts with "no", so we have to test for -# output on stderr instead of checking the exit status. -# -# Current versions of gcc require -no-pie; older versions require -# -nopie. We therefore test for both. -# -ifeq ($(CCTYPE),gcc) -PIE_TEST = [ -z "`$(CC) -fno-PIE -no-pie -x c -c /dev/null -o /dev/null 2>&1`" ] -PIE_FLAGS := $(shell $(PIE_TEST) && $(ECHO) '-fno-PIE -no-pie') -PIE_TEST2 = [ -z "`$(CC) -fno-PIE -nopie -x c -c /dev/null -o /dev/null 2>&1`" ] -PIE_FLAGS2 := $(shell $(PIE_TEST2) && $(ECHO) '-fno-PIE -nopie') -WORKAROUND_CFLAGS += $(PIE_FLAGS) $(PIE_FLAGS2) -endif - # i386-specific directories containing source files # SRCDIRS += arch/i386/core diff --git a/src/arch/i386/Makefile.linux b/src/arch/i386/Makefile.linux index 46328c83b..fe4229e94 100644 --- a/src/arch/i386/Makefile.linux +++ b/src/arch/i386/Makefile.linux @@ -1,6 +1,14 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# Linker script +# LDSCRIPT = arch/i386/scripts/linux.lds -SRCDIRS += arch/i386/core/linux +# Compiler flags for building host API wrapper +# +LINUX_CFLAGS += -m32 +# Include generic Linux Makefile +# MAKEDEPS += arch/x86/Makefile.linux include arch/x86/Makefile.linux diff --git a/src/arch/i386/core/linux/linux_syscall.S b/src/arch/i386/core/linux/linux_syscall.S deleted file mode 100644 index 38a3e74bd..000000000 --- a/src/arch/i386/core/linux/linux_syscall.S +++ /dev/null @@ -1,45 +0,0 @@ - - .section ".data" - .globl linux_errno - -linux_errno: .int 0 - - .section ".text" - .code32 - .globl linux_syscall - .type linux_syscall, @function - -linux_syscall: - /* Save registers */ - pushl %ebx - pushl %esi - pushl %edi - pushl %ebp - - movl 20(%esp), %eax // C arg1 -> syscall number - movl 24(%esp), %ebx // C arg2 -> syscall arg1 - movl 28(%esp), %ecx // C arg3 -> syscall arg2 - movl 32(%esp), %edx // C arg4 -> syscall arg3 - movl 36(%esp), %esi // C arg5 -> syscall arg4 - movl 40(%esp), %edi // C arg6 -> syscall arg5 - movl 44(%esp), %ebp // C arg7 -> syscall arg6 - - int $0x80 - - /* Restore registers */ - popl %ebp - popl %edi - popl %esi - popl %ebx - - cmpl $-4095, %eax - jae 1f - ret - -1: - negl %eax - movl %eax, linux_errno - movl $-1, %eax - ret - - .size linux_syscall, . - linux_syscall diff --git a/src/arch/i386/core/linux/linuxprefix.S b/src/arch/i386/core/linux/linuxprefix.S deleted file mode 100644 index 398d3cb21..000000000 --- a/src/arch/i386/core/linux/linuxprefix.S +++ /dev/null @@ -1,28 +0,0 @@ -#include - - .section ".text" - .code32 - .globl _linux_start - .type _linux_start, @function - -_linux_start: - xorl %ebp, %ebp - - popl %esi // save argc - movl %esp, %edi // save argv - - andl $~15, %esp // 16-byte align the stack - - pushl %edi // argv -> C arg2 - pushl %esi // argc -> C arg1 - - call save_args - - /* Our main doesn't use any arguments */ - call main - - movl %eax, %ebx // rc -> syscall arg1 - movl $__NR_exit, %eax - int $0x80 - - .size _linux_start, . - _linux_start diff --git a/src/arch/i386/include/bits/compiler.h b/src/arch/i386/include/bits/compiler.h index 7c4a09396..87201135f 100644 --- a/src/arch/i386/include/bits/compiler.h +++ b/src/arch/i386/include/bits/compiler.h @@ -9,7 +9,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #ifndef ASSEMBLY /** Declare a function with standard calling conventions */ -#define __asmcall __attribute__ (( used, cdecl, regparm(0) )) +#define __asmcall __attribute__ (( cdecl, regparm(0) )) /** * Declare a function with libgcc implicit linkage diff --git a/src/arch/i386/include/bits/linux_api.h b/src/arch/i386/include/bits/linux_api.h deleted file mode 100644 index dc6e7416e..000000000 --- a/src/arch/i386/include/bits/linux_api.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _I386_LINUX_API_H -#define _I386_LINUX_API_H - -#define __SYSCALL_mmap __NR_mmap2 - -#endif /* _I386_LINUX_API_H */ diff --git a/src/arch/x86/Makefile.linux b/src/arch/x86/Makefile.linux index 1faf84753..b60065567 100644 --- a/src/arch/x86/Makefile.linux +++ b/src/arch/x86/Makefile.linux @@ -1,13 +1,10 @@ -MEDIA = linux - -# enable valgrind -CFLAGS += -UNVALGRIND +# -*- makefile -*- : Force emacs to use Makefile mode +# Include x86 Linux headers +# INCDIRS += arch/x86/include/linux -SRCDIRS += interface/linux -SRCDIRS += drivers/linux -SRCDIRS += arch/x86/core/linux -$(BIN)/%.linux : $(BIN)/%.linux.tmp - $(QM)$(ECHO) " [FINISH] $@" - $(Q)$(CP) $< $@ +# Include generic Linux Makefile +# +MAKEDEPS += Makefile.linux +include Makefile.linux diff --git a/src/arch/x86/Makefile.pcbios b/src/arch/x86/Makefile.pcbios index c44eefc1f..ed8d554a7 100644 --- a/src/arch/x86/Makefile.pcbios +++ b/src/arch/x86/Makefile.pcbios @@ -4,18 +4,15 @@ # SRCDIRS += arch/x86/drivers/net -# The i386 linker script +# The linker scripts # LDSCRIPT = arch/x86/scripts/pcbios.lds +LDSCRIPT_PREFIX = arch/x86/scripts/prefixonly.lds # Stop ld from complaining about our customised linker script # LDFLAGS += -N --no-check-sections -# Prefix always starts at address zero -# -LDFLAGS += --section-start=.prefix=0 - # Media types. # MEDIA += rom @@ -57,46 +54,11 @@ LIST_NAME_mrom := ROMS LIST_NAME_pcirom := ROMS LIST_NAME_isarom := ROMS -# Locations of isolinux files -# -SYSLINUX_DIR_LIST := \ - /usr/lib/syslinux \ - /usr/lib/syslinux/bios \ - /usr/lib/syslinux/modules/bios \ - /usr/share/syslinux \ - /usr/share/syslinux/bios \ - /usr/share/syslinux/modules/bios \ - /usr/local/share/syslinux \ - /usr/local/share/syslinux/bios \ - /usr/local/share/syslinux/modules/bios \ - /usr/lib/ISOLINUX -ISOLINUX_BIN_LIST := \ - $(ISOLINUX_BIN) \ - $(patsubst %,%/isolinux.bin,$(SYSLINUX_DIR_LIST)) -LDLINUX_C32_LIST := \ - $(LDLINUX_C32) \ - $(patsubst %,%/ldlinux.c32,$(SYSLINUX_DIR_LIST)) -ISOLINUX_BIN = $(firstword $(wildcard $(ISOLINUX_BIN_LIST))) -LDLINUX_C32 = $(firstword $(wildcard $(LDLINUX_C32_LIST))) - -# rule to make a non-emulation ISO boot image +# ISO or FAT filesystem images NON_AUTO_MEDIA += iso -%iso: %lkrn util/geniso - $(QM)$(ECHO) " [GENISO] $@" - $(Q)ISOLINUX_BIN=$(ISOLINUX_BIN) LDLINUX_C32=$(LDLINUX_C32) \ - VERSION="$(VERSION)" bash util/geniso -o $@ $< - -# rule to make a floppy emulation ISO boot image -NON_AUTO_MEDIA += liso -%liso: %lkrn util/geniso - $(QM)$(ECHO) " [GENISO] $@" - $(Q)VERSION="$(VERSION)" bash util/geniso -l -o $@ $< - -# rule to make a syslinux floppy image (mountable, bootable) -NON_AUTO_MEDIA += sdsk -%sdsk: %lkrn util/gensdsk - $(QM)$(ECHO) " [GENSDSK] $@" - $(Q)bash util/gensdsk $@ $< +$(BIN)/%.iso $(BIN)/%.sdsk: $(BIN)/%.lkrn util/genfsimg + $(QM)$(ECHO) " [GENFSIMG] $@" + $(Q)util/genfsimg -o $@ $< # rule to write disk images to /dev/fd0 NON_AUTO_MEDIA += fd0 @@ -108,12 +70,12 @@ NON_AUTO_MEDIA += fd0 # Special target for building Master Boot Record binary $(BIN)/mbr.tmp : $(BIN)/mbr.o $(QM)$(ECHO) " [LD] $@" - $(Q)$(LD) $(LDFLAGS) -o $@ -e mbr $< + $(Q)$(LD) $(LDFLAGS) -T $(LDSCRIPT_PREFIX) -o $@ -e mbr $< # rule to make a USB disk image $(BIN)/usbdisk.tmp : $(BIN)/usbdisk.o $(QM)$(ECHO) " [LD] $@" - $(Q)$(LD) $(LDFLAGS) -o $@ -e mbr $< + $(Q)$(LD) $(LDFLAGS) -T $(LDSCRIPT_PREFIX) -o $@ -e mbr $< NON_AUTO_MEDIA += usb %usb: $(BIN)/usbdisk.bin %hd diff --git a/src/arch/x86/core/cachedhcp.c b/src/arch/x86/core/cachedhcp.c deleted file mode 100644 index dffafe3c9..000000000 --- a/src/arch/x86/core/cachedhcp.c +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (C) 2013 Michael Brown . - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * You can also choose to distribute this program under the terms of - * the Unmodified Binary Distribution Licence (as given in the file - * COPYING.UBDL), provided that you have satisfied its requirements. - */ - -FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); - -#include -#include -#include -#include -#include -#include -#include - -/** @file - * - * Cached DHCP packet - * - */ - -/** Cached DHCPACK physical address - * - * This can be set by the prefix. - */ -uint32_t __bss16 ( cached_dhcpack_phys ); -#define cached_dhcpack_phys __use_data16 ( cached_dhcpack_phys ) - -/** Colour for debug messages */ -#define colour &cached_dhcpack_phys - -/** Cached DHCPACK */ -static struct dhcp_packet *cached_dhcpack; - -/** - * Cached DHCPACK startup function - * - */ -static void cachedhcp_init ( void ) { - struct dhcp_packet *dhcppkt; - struct dhcp_packet *tmp; - struct dhcphdr *dhcphdr; - size_t max_len; - size_t len; - - /* Do nothing if no cached DHCPACK is present */ - if ( ! cached_dhcpack_phys ) { - DBGC ( colour, "CACHEDHCP found no cached DHCPACK\n" ); - return; - } - - /* No reliable way to determine length before parsing packet; - * start by assuming maximum length permitted by PXE. - */ - max_len = sizeof ( BOOTPLAYER_t ); - - /* Allocate and populate DHCP packet */ - dhcppkt = zalloc ( sizeof ( *dhcppkt ) + max_len ); - if ( ! dhcppkt ) { - DBGC ( colour, "CACHEDHCP could not allocate copy\n" ); - return; - } - dhcphdr = ( ( ( void * ) dhcppkt ) + sizeof ( *dhcppkt ) ); - copy_from_user ( dhcphdr, phys_to_user ( cached_dhcpack_phys ), 0, - max_len ); - dhcppkt_init ( dhcppkt, dhcphdr, max_len ); - - /* Shrink packet to required length. If reallocation fails, - * just continue to use the original packet and waste the - * unused space. - */ - len = dhcppkt_len ( dhcppkt ); - assert ( len <= max_len ); - tmp = realloc ( dhcppkt, ( sizeof ( *dhcppkt ) + len ) ); - if ( tmp ) - dhcppkt = tmp; - - /* Reinitialise packet at new address */ - dhcphdr = ( ( ( void * ) dhcppkt ) + sizeof ( *dhcppkt ) ); - dhcppkt_init ( dhcppkt, dhcphdr, len ); - - /* Store as cached DHCPACK, and mark original copy as consumed */ - DBGC ( colour, "CACHEDHCP found cached DHCPACK at %08x+%zx\n", - cached_dhcpack_phys, len ); - cached_dhcpack = dhcppkt; - cached_dhcpack_phys = 0; -} - -/** - * Cached DHCPACK startup function - * - */ -static void cachedhcp_startup ( void ) { - - /* If cached DHCP packet was not claimed by any network device - * during startup, then free it. - */ - if ( cached_dhcpack ) { - DBGC ( colour, "CACHEDHCP freeing unclaimed cached DHCPACK\n" ); - dhcppkt_put ( cached_dhcpack ); - cached_dhcpack = NULL; - } -} - -/** Cached DHCPACK initialisation function */ -struct init_fn cachedhcp_init_fn __init_fn ( INIT_NORMAL ) = { - .initialise = cachedhcp_init, -}; - -/** Cached DHCPACK startup function */ -struct startup_fn cachedhcp_startup_fn __startup_fn ( STARTUP_LATE ) = { - .name = "cachedhcp", - .startup = cachedhcp_startup, -}; - -/** - * Apply cached DHCPACK to network device, if applicable - * - * @v netdev Network device - * @ret rc Return status code - */ -static int cachedhcp_probe ( struct net_device *netdev ) { - struct ll_protocol *ll_protocol = netdev->ll_protocol; - int rc; - - /* Do nothing unless we have a cached DHCPACK */ - if ( ! cached_dhcpack ) - return 0; - - /* Do nothing unless cached DHCPACK's MAC address matches this - * network device. - */ - if ( memcmp ( netdev->ll_addr, cached_dhcpack->dhcphdr->chaddr, - ll_protocol->ll_addr_len ) != 0 ) { - DBGC ( colour, "CACHEDHCP cached DHCPACK does not match %s\n", - netdev->name ); - return 0; - } - DBGC ( colour, "CACHEDHCP cached DHCPACK is for %s\n", netdev->name ); - - /* Register as DHCP settings for this network device */ - if ( ( rc = register_settings ( &cached_dhcpack->settings, - netdev_settings ( netdev ), - DHCP_SETTINGS_NAME ) ) != 0 ) { - DBGC ( colour, "CACHEDHCP could not register settings: %s\n", - strerror ( rc ) ); - return rc; - } - - /* Claim cached DHCPACK */ - dhcppkt_put ( cached_dhcpack ); - cached_dhcpack = NULL; - - return 0; -} - -/** Cached DHCP packet network device driver */ -struct net_driver cachedhcp_driver __net_driver = { - .name = "cachedhcp", - .probe = cachedhcp_probe, -}; diff --git a/src/arch/x86/core/linux/linux_api.c b/src/arch/x86/core/linux/linux_api.c deleted file mode 100644 index 17b1f3fd4..000000000 --- a/src/arch/x86/core/linux/linux_api.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (C) 2010 Piotr JaroszyÅ„ski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -FILE_LICENCE ( GPL2_OR_LATER ); - -/** @file - * - * Implementation of most of the linux API. - */ - -#include - -#include -#include -#include - -int linux_open ( const char *pathname, int flags ) { - return linux_syscall ( __NR_open, pathname, flags ); -} - -int linux_close ( int fd ) { - return linux_syscall ( __NR_close, fd ); -} - -off_t linux_lseek ( int fd, off_t offset, int whence ) { - return linux_syscall ( __NR_lseek, fd, offset, whence ); -} - -__kernel_ssize_t linux_read ( int fd, void *buf, __kernel_size_t count ) { - return linux_syscall ( __NR_read, fd, buf, count ); -} - -__kernel_ssize_t linux_write ( int fd, const void *buf, - __kernel_size_t count ) { - return linux_syscall ( __NR_write, fd, buf, count ); -} - -int linux_fcntl ( int fd, int cmd, ... ) { - long arg; - va_list list; - - va_start ( list, cmd ); - arg = va_arg ( list, long ); - va_end ( list ); - - return linux_syscall ( __NR_fcntl, fd, cmd, arg ); -} - -int linux_ioctl ( int fd, int request, ... ) { - void *arg; - va_list list; - - va_start ( list, request ); - arg = va_arg ( list, void * ); - va_end ( list ); - - return linux_syscall ( __NR_ioctl, fd, request, arg ); -} - -int linux_poll ( struct pollfd *fds, nfds_t nfds, int timeout ) { - return linux_syscall ( __NR_poll, fds, nfds, timeout ); -} - -int linux_nanosleep ( const struct timespec *req, struct timespec *rem ) { - return linux_syscall ( __NR_nanosleep, req, rem ); -} - -int linux_usleep ( useconds_t usec ) { - struct timespec ts = { - .tv_sec = ( ( long ) ( usec / 1000000 ) ), - .tv_nsec = ( ( long ) ( usec % 1000000 ) * 1000UL ), - }; - - return linux_nanosleep ( &ts, NULL ); -} - -int linux_gettimeofday ( struct timeval *tv, struct timezone *tz ) { - return linux_syscall ( __NR_gettimeofday, tv, tz ); -} - -void * linux_mmap ( void *addr, __kernel_size_t length, int prot, int flags, - int fd, __kernel_off_t offset ) { - return ( void * ) linux_syscall ( __SYSCALL_mmap, addr, length, prot, - flags, fd, offset ); -} - -void * linux_mremap ( void *old_address, __kernel_size_t old_size, - __kernel_size_t new_size, int flags ) { - return ( void * ) linux_syscall ( __NR_mremap, old_address, old_size, - new_size, flags ); -} - -int linux_munmap ( void *addr, __kernel_size_t length ) { - return linux_syscall ( __NR_munmap, addr, length ); -} - -int linux_socket ( int domain, int type_, int protocol ) { -#ifdef __NR_socket - return linux_syscall ( __NR_socket, domain, type_, protocol ); -#else -#ifndef SOCKOP_socket -# define SOCKOP_socket 1 -#endif - unsigned long sc_args[] = { domain, type_, protocol }; - return linux_syscall ( __NR_socketcall, SOCKOP_socket, sc_args ); -#endif -} - -int linux_bind ( int fd, const struct sockaddr *addr, socklen_t addrlen ) { -#ifdef __NR_bind - return linux_syscall ( __NR_bind, fd, addr, addrlen ); -#else -#ifndef SOCKOP_bind -# define SOCKOP_bind 2 -#endif - unsigned long sc_args[] = { fd, (unsigned long)addr, addrlen }; - return linux_syscall ( __NR_socketcall, SOCKOP_bind, sc_args ); -#endif -} - -ssize_t linux_sendto ( int fd, const void *buf, size_t len, int flags, - const struct sockaddr *daddr, socklen_t addrlen ) { -#ifdef __NR_sendto - return linux_syscall ( __NR_sendto, fd, buf, len, flags, - daddr, addrlen ); -#else -#ifndef SOCKOP_sendto -# define SOCKOP_sendto 11 -#endif - unsigned long sc_args[] = { fd, (unsigned long)buf, len, - flags, (unsigned long)daddr, addrlen }; - return linux_syscall ( __NR_socketcall, SOCKOP_sendto, sc_args ); -#endif -} diff --git a/src/arch/x86/core/linux/linux_strerror.c b/src/arch/x86/core/linux/linux_strerror.c deleted file mode 100644 index 24c9b7738..000000000 --- a/src/arch/x86/core/linux/linux_strerror.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (C) 2010 Piotr JaroszyÅ„ski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -FILE_LICENCE(GPL2_OR_LATER); - -/** @file - * - * linux_strerror implementation - */ - -#include -#include - -/** Error names from glibc */ -static const char *errors[] = { - "Success", - "Operation not permitted", - "No such file or directory", - "No such process", - "Interrupted system call", - "Input/output error", - "No such device or address", - "Argument list too long", - "Exec format error", - "Bad file descriptor", - "No child processes", - "Resource temporarily unavailable", - "Cannot allocate memory", - "Permission denied", - "Bad address", - "Block device required", - "Device or resource busy", - "File exists", - "Invalid cross-device link", - "No such device", - "Not a directory", - "Is a directory", - "Invalid argument", - "Too many open files in system", - "Too many open files", - "Inappropriate ioctl for device", - "Text file busy", - "File too large", - "No space left on device", - "Illegal seek", - "Read-only file system", - "Too many links", - "Broken pipe", - "Numerical argument out of domain", - "Numerical result out of range", - "Resource deadlock avoided", - "File name too long", - "No locks available", - "Function not implemented", - "Directory not empty", - "Too many levels of symbolic links", - "", - "No message of desired type", - "Identifier removed", - "Channel number out of range", - "Level 2 not synchronized", - "Level 3 halted", - "Level 3 reset", - "Link number out of range", - "Protocol driver not attached", - "No CSI structure available", - "Level 2 halted", - "Invalid exchange", - "Invalid request descriptor", - "Exchange full", - "No anode", - "Invalid request code", - "Invalid slot", - "", - "Bad font file format", - "Device not a stream", - "No data available", - "Timer expired", - "Out of streams resources", - "Machine is not on the network", - "Package not installed", - "Object is remote", - "Link has been severed", - "Advertise error", - "Srmount error", - "Communication error on send", - "Protocol error", - "Multihop attempted", - "RFS specific error", - "Bad message", - "Value too large for defined data type", - "Name not unique on network", - "File descriptor in bad state", - "Remote address changed", - "Can not access a needed shared library", - "Accessing a corrupted shared library", - ".lib section in a.out corrupted", - "Attempting to link in too many shared libraries", - "Cannot exec a shared library directly", - "Invalid or incomplete multibyte or wide character", - "Interrupted system call should be restarted", - "Streams pipe error", - "Too many users", - "Socket operation on non-socket", - "Destination address required", - "Message too long", - "Protocol wrong type for socket", - "Protocol not available", - "Protocol not supported", - "Socket type not supported", - "Operation not supported", - "Protocol family not supported", - "Address family not supported by protocol", - "Address already in use", - "Cannot assign requested address", - "Network is down", - "Network is unreachable", - "Network dropped connection on reset", - "Software caused connection abort", - "Connection reset by peer", - "No buffer space available", - "Transport endpoint is already connected", - "Transport endpoint is not connected", - "Cannot send after transport endpoint shutdown", - "Too many references: cannot splice", - "Connection timed out", - "Connection refused", - "Host is down", - "No route to host", - "Operation already in progress", - "Operation now in progress", - "Stale NFS file handle", - "Structure needs cleaning", - "Not a XENIX named type file", - "No XENIX semaphores available", - "Is a named type file", - "Remote I/O error", - "Disk quota exceeded", - "No medium found", - "Wrong medium type", -}; - -const char *linux_strerror(int errnum) -{ - static char errbuf[64]; - static int errors_size = sizeof(errors) / sizeof(*errors); - - if (errnum >= errors_size || errnum < 0) { - snprintf(errbuf, sizeof(errbuf), "Error %#08x", errnum); - return errbuf; - } else { - return errors[errnum]; - } -} diff --git a/src/arch/x86/core/pcidirect.c b/src/arch/x86/core/pcidirect.c index 0d09be84b..9b8226fea 100644 --- a/src/arch/x86/core/pcidirect.c +++ b/src/arch/x86/core/pcidirect.c @@ -52,3 +52,4 @@ PROVIDE_PCIAPI_INLINE ( direct, pci_read_config_dword ); PROVIDE_PCIAPI_INLINE ( direct, pci_write_config_byte ); PROVIDE_PCIAPI_INLINE ( direct, pci_write_config_word ); PROVIDE_PCIAPI_INLINE ( direct, pci_write_config_dword ); +PROVIDE_PCIAPI_INLINE ( direct, pci_ioremap ); diff --git a/src/arch/x86/core/runtime.c b/src/arch/x86/core/runtime.c index f96b23af4..02072b5bf 100644 --- a/src/arch/x86/core/runtime.c +++ b/src/arch/x86/core/runtime.c @@ -38,7 +38,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** Command line physical address @@ -180,7 +179,6 @@ static int cmdline_init ( void ) { */ static int initrd_init ( void ) { struct image *image; - int rc; /* Do nothing if no initrd was specified */ if ( ! initrd_phys ) { @@ -194,53 +192,18 @@ static int initrd_init ( void ) { DBGC ( colour, "RUNTIME found initrd at [%x,%x)\n", initrd_phys, ( initrd_phys + initrd_len ) ); - /* Allocate image */ - image = alloc_image ( NULL ); + /* Create initrd image */ + image = image_memory ( "", phys_to_user ( initrd_phys ), + initrd_len ); if ( ! image ) { - DBGC ( colour, "RUNTIME could not allocate image for " - "initrd\n" ); - rc = -ENOMEM; - goto err_alloc_image; + DBGC ( colour, "RUNTIME could not create initrd image\n" ); + return -ENOMEM; } - if ( ( rc = image_set_name ( image, "" ) ) != 0 ) { - DBGC ( colour, "RUNTIME could not set image name: %s\n", - strerror ( rc ) ); - goto err_set_name; - } - - /* Allocate and copy initrd content */ - image->data = umalloc ( initrd_len ); - if ( ! image->data ) { - DBGC ( colour, "RUNTIME could not allocate %d bytes for " - "initrd\n", initrd_len ); - rc = -ENOMEM; - goto err_umalloc; - } - image->len = initrd_len; - memcpy_user ( image->data, 0, phys_to_user ( initrd_phys ), 0, - initrd_len ); /* Mark initrd as consumed */ initrd_phys = 0; - /* Register image */ - if ( ( rc = register_image ( image ) ) != 0 ) { - DBGC ( colour, "RUNTIME could not register initrd: %s\n", - strerror ( rc ) ); - goto err_register_image; - } - - /* Drop our reference to the image */ - image_put ( image ); - return 0; - - err_register_image: - err_umalloc: - err_set_name: - image_put ( image ); - err_alloc_image: - return rc; } /** diff --git a/src/arch/x86/core/stack.S b/src/arch/x86/core/stack.S index 995c397ca..baa19ff84 100644 --- a/src/arch/x86/core/stack.S +++ b/src/arch/x86/core/stack.S @@ -13,7 +13,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) **************************************************************************** */ .section ".stack", "aw", @nobits - .align 8 + .balign 8 .globl _stack _stack: .space STACK_SIZE diff --git a/src/arch/x86/core/stack16.S b/src/arch/x86/core/stack16.S index 4bc6f081a..ad67e4f2d 100644 --- a/src/arch/x86/core/stack16.S +++ b/src/arch/x86/core/stack16.S @@ -7,7 +7,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) **************************************************************************** */ .section ".stack16", "aw", @nobits - .align 8 + .balign 8 .globl _stack16 _stack16: .space 4096 diff --git a/src/arch/x86/core/x86_bigint.c b/src/arch/x86/core/x86_bigint.c index 6413b2fa8..9a25bdad5 100644 --- a/src/arch/x86/core/x86_bigint.c +++ b/src/arch/x86/core/x86_bigint.c @@ -75,17 +75,18 @@ void bigint_multiply_raw ( const uint32_t *multiplicand0, * * a < 2^{n}, b < 2^{n} => ab < 2^{2n} */ - __asm__ __volatile__ ( "mull %4\n\t" - "addl %%eax, (%5,%2,4)\n\t" - "adcl %%edx, 4(%5,%2,4)\n\t" + __asm__ __volatile__ ( "mull %5\n\t" + "addl %%eax, (%6,%2,4)\n\t" + "adcl %%edx, 4(%6,%2,4)\n\t" "\n1:\n\t" - "adcl $0, 8(%5,%2,4)\n\t" + "adcl $0, 8(%6,%2,4)\n\t" "inc %2\n\t" /* Does not affect CF */ "jc 1b\n\t" : "=&a" ( discard_a ), "=&d" ( discard_d ), - "=&r" ( index ) + "=&r" ( index ), + "+m" ( *result ) : "0" ( multiplicand_element ), "g" ( multiplier_element ), "r" ( result_elements ), diff --git a/src/arch/x86/core/x86_string.c b/src/arch/x86/core/x86_string.c index 7d5e4a5f1..1a1e79dac 100644 --- a/src/arch/x86/core/x86_string.c +++ b/src/arch/x86/core/x86_string.c @@ -30,6 +30,14 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include +#include + +/* Use generic_memcpy_reverse() if we cannot safely set the direction flag */ +#ifdef UNSAFE_STD +#define USE_GENERIC_MEMCPY_REVERSE 1 +#else +#define USE_GENERIC_MEMCPY_REVERSE 0 +#endif /** * Copy memory area @@ -77,6 +85,12 @@ void * __attribute__ (( noinline )) __memcpy_reverse ( void *dest, const void *esi = ( src + len - 1 ); int discard_ecx; + /* Use unoptimised version if we are not permitted to modify + * the direction flag. + */ + if ( USE_GENERIC_MEMCPY_REVERSE ) + return generic_memcpy_reverse ( dest, src, len ); + /* Assume memmove() is not performance-critical, and perform a * bytewise copy for simplicity. */ diff --git a/src/arch/x86/drivers/hyperv/hyperv.c b/src/arch/x86/drivers/hyperv/hyperv.c index 1903d1db2..9d3a42da0 100644 --- a/src/arch/x86/drivers/hyperv/hyperv.c +++ b/src/arch/x86/drivers/hyperv/hyperv.c @@ -83,7 +83,7 @@ hv_alloc_pages ( struct hv_hypervisor *hv, ... ) { /* Allocate and zero pages */ va_start ( args, hv ); for ( i = 0 ; ( ( page = va_arg ( args, void ** ) ) != NULL ); i++ ) { - *page = malloc_dma ( PAGE_SIZE, PAGE_SIZE ); + *page = malloc_phys ( PAGE_SIZE, PAGE_SIZE ); if ( ! *page ) goto err_alloc; memset ( *page, 0, PAGE_SIZE ); @@ -97,7 +97,7 @@ hv_alloc_pages ( struct hv_hypervisor *hv, ... ) { va_start ( args, hv ); for ( ; i >= 0 ; i-- ) { page = va_arg ( args, void ** ); - free_dma ( *page, PAGE_SIZE ); + free_phys ( *page, PAGE_SIZE ); } va_end ( args ); return -ENOMEM; @@ -116,7 +116,7 @@ hv_free_pages ( struct hv_hypervisor *hv, ... ) { va_start ( args, hv ); while ( ( page = va_arg ( args, void * ) ) != NULL ) - free_dma ( page, PAGE_SIZE ); + free_phys ( page, PAGE_SIZE ); va_end ( args ); } @@ -131,8 +131,8 @@ static int hv_alloc_message ( struct hv_hypervisor *hv ) { /* Allocate buffer. Must be aligned to at least 8 bytes and * must not cross a page boundary, so align on its own size. */ - hv->message = malloc_dma ( sizeof ( *hv->message ), - sizeof ( *hv->message ) ); + hv->message = malloc_phys ( sizeof ( *hv->message ), + sizeof ( *hv->message ) ); if ( ! hv->message ) return -ENOMEM; @@ -147,7 +147,7 @@ static int hv_alloc_message ( struct hv_hypervisor *hv ) { static void hv_free_message ( struct hv_hypervisor *hv ) { /* Free buffer */ - free_dma ( hv->message, sizeof ( *hv->message ) ); + free_phys ( hv->message, sizeof ( *hv->message ) ); } /** diff --git a/src/arch/x86/drivers/net/undinet.c b/src/arch/x86/drivers/net/undinet.c index 9b7d6d849..43cb18bfe 100644 --- a/src/arch/x86/drivers/net/undinet.c +++ b/src/arch/x86/drivers/net/undinet.c @@ -104,6 +104,13 @@ static union u_PXENV_ANY __bss16 ( undinet_params ); SEGOFF16_t __bss16 ( undinet_entry_point ); #define undinet_entry_point __use_data16 ( undinet_entry_point ) +/* Read TSC in real mode only when profiling */ +#if PROFILING +#define RDTSC_IF_PROFILING "rdtsc\n\t" +#else +#define RDTSC_IF_PROFILING "" +#endif + /** IRQ profiler */ static struct profiler undinet_irq_profiler __profiler = { .name = "undinet.irq" }; @@ -288,14 +295,14 @@ static int undinet_call ( struct undi_nic *undinic, unsigned int function, */ profile_start ( &profiler->total ); __asm__ __volatile__ ( REAL_CODE ( "pushl %%ebp\n\t" /* gcc bug */ - "rdtsc\n\t" + RDTSC_IF_PROFILING "pushl %%eax\n\t" "pushw %%es\n\t" "pushw %%di\n\t" "pushw %%bx\n\t" "lcall *undinet_entry_point\n\t" "movw %%ax, %%bx\n\t" - "rdtsc\n\t" + RDTSC_IF_PROFILING "addw $6, %%sp\n\t" "popl %%edx\n\t" "popl %%ebp\n\t" /* gcc bug */ ) diff --git a/src/arch/x86/drivers/xen/hvm.c b/src/arch/x86/drivers/xen/hvm.c index 57196f555..b77cdd14c 100644 --- a/src/arch/x86/drivers/xen/hvm.c +++ b/src/arch/x86/drivers/xen/hvm.c @@ -106,7 +106,7 @@ static int hvm_map_hypercall ( struct hvm_device *hvm ) { /* Allocate pages */ hvm->hypercall_len = ( pages * PAGE_SIZE ); - hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE ); + hvm->xen.hypercall = malloc_phys ( hvm->hypercall_len, PAGE_SIZE ); if ( ! hvm->xen.hypercall ) { DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n", pages ); @@ -141,7 +141,7 @@ static int hvm_map_hypercall ( struct hvm_device *hvm ) { static void hvm_unmap_hypercall ( struct hvm_device *hvm ) { /* Free pages */ - free_dma ( hvm->xen.hypercall, hvm->hypercall_len ); + free_phys ( hvm->xen.hypercall, hvm->hypercall_len ); } /** @@ -175,7 +175,7 @@ static void * hvm_ioremap ( struct hvm_device *hvm, unsigned int space, } /* Map this space */ - mmio = ioremap ( ( hvm->mmio + hvm->mmio_offset ), len ); + mmio = pci_ioremap ( hvm->pci, ( hvm->mmio + hvm->mmio_offset ), len ); if ( ! mmio ) { DBGC ( hvm, "HVM could not map MMIO space [%08lx,%08lx)\n", ( hvm->mmio + hvm->mmio_offset ), @@ -371,7 +371,8 @@ static int hvm_map_xenstore ( struct hvm_device *hvm ) { xenstore_phys = ( xenstore_pfn * PAGE_SIZE ); /* Map XenStore */ - hvm->xen.store.intf = ioremap ( xenstore_phys, PAGE_SIZE ); + hvm->xen.store.intf = pci_ioremap ( hvm->pci, xenstore_phys, + PAGE_SIZE ); if ( ! hvm->xen.store.intf ) { DBGC ( hvm, "HVM could not map XenStore at [%08lx,%08lx)\n", xenstore_phys, ( xenstore_phys + PAGE_SIZE ) ); @@ -420,6 +421,7 @@ static int hvm_probe ( struct pci_device *pci ) { rc = -ENOMEM; goto err_alloc; } + hvm->pci = pci; hvm->mmio = pci_bar_start ( pci, HVM_MMIO_BAR ); hvm->mmio_len = pci_bar_size ( pci, HVM_MMIO_BAR ); DBGC2 ( hvm, "HVM has MMIO space [%08lx,%08lx)\n", diff --git a/src/arch/x86/drivers/xen/hvm.h b/src/arch/x86/drivers/xen/hvm.h index 72ed94f6d..88e490815 100644 --- a/src/arch/x86/drivers/xen/hvm.h +++ b/src/arch/x86/drivers/xen/hvm.h @@ -39,6 +39,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); struct hvm_device { /** Xen hypervisor */ struct xen_hypervisor xen; + /** PCI device */ + struct pci_device *pci; /** CPUID base */ uint32_t cpuid_base; /** Length of hypercall table */ diff --git a/src/arch/x86/image/bzimage.c b/src/arch/x86/image/bzimage.c index 51498bf95..a782127a1 100644 --- a/src/arch/x86/image/bzimage.c +++ b/src/arch/x86/image/bzimage.c @@ -326,32 +326,6 @@ static void bzimage_set_cmdline ( struct image *image, DBGC ( image, "bzImage %p command line \"%s\"\n", image, cmdline ); } -/** - * Parse standalone image command line for cpio parameters - * - * @v image bzImage file - * @v cpio CPIO header - * @v cmdline Command line - */ -static void bzimage_parse_cpio_cmdline ( struct image *image, - struct cpio_header *cpio, - const char *cmdline ) { - char *arg; - char *end; - unsigned int mode; - - /* Look for "mode=" */ - if ( ( arg = strstr ( cmdline, "mode=" ) ) ) { - arg += 5; - mode = strtoul ( arg, &end, 8 /* Octal for file mode */ ); - if ( *end && ( *end != ' ' ) ) { - DBGC ( image, "bzImage %p strange \"mode=\"" - "terminator '%c'\n", image, *end ); - } - cpio_set_field ( cpio->c_mode, ( 0100000 | mode ) ); - } -} - /** * Align initrd length * @@ -374,11 +348,9 @@ static inline size_t bzimage_align ( size_t len ) { static size_t bzimage_load_initrd ( struct image *image, struct image *initrd, userptr_t address ) { - char *filename = initrd->cmdline; - char *cmdline; + const char *filename = cpio_name ( initrd ); struct cpio_header cpio; size_t offset; - size_t name_len; size_t pad_len; /* Do not include kernel image itself as an initrd */ @@ -386,25 +358,7 @@ static size_t bzimage_load_initrd ( struct image *image, return 0; /* Create cpio header for non-prebuilt images */ - if ( filename && filename[0] ) { - cmdline = strchr ( filename, ' ' ); - name_len = ( ( cmdline ? ( ( size_t ) ( cmdline - filename ) ) - : strlen ( filename ) ) + 1 /* NUL */ ); - memset ( &cpio, '0', sizeof ( cpio ) ); - memcpy ( cpio.c_magic, CPIO_MAGIC, sizeof ( cpio.c_magic ) ); - cpio_set_field ( cpio.c_mode, 0100644 ); - cpio_set_field ( cpio.c_nlink, 1 ); - cpio_set_field ( cpio.c_filesize, initrd->len ); - cpio_set_field ( cpio.c_namesize, name_len ); - if ( cmdline ) { - bzimage_parse_cpio_cmdline ( image, &cpio, - ( cmdline + 1 /* ' ' */ )); - } - offset = ( ( sizeof ( cpio ) + name_len + 0x03 ) & ~0x03 ); - } else { - offset = 0; - name_len = 0; - } + offset = cpio_header ( initrd, &cpio ); /* Copy in initrd image body (and cpio header if applicable) */ if ( address ) { @@ -413,7 +367,7 @@ static size_t bzimage_load_initrd ( struct image *image, memset_user ( address, 0, 0, offset ); copy_to_user ( address, 0, &cpio, sizeof ( cpio ) ); copy_to_user ( address, sizeof ( cpio ), filename, - ( name_len - 1 /* NUL (or space) */ ) ); + cpio_name_len ( initrd ) ); } DBGC ( image, "bzImage %p initrd %p [%#08lx,%#08lx,%#08lx)" "%s%s\n", image, initrd, user_to_phys ( address, 0 ), diff --git a/src/arch/x86/image/com32.c b/src/arch/x86/image/com32.c index 016652877..6f0e66041 100644 --- a/src/arch/x86/image/com32.c +++ b/src/arch/x86/image/com32.c @@ -110,7 +110,7 @@ static int com32_exec_loop ( struct image *image ) { /* Disable interrupts */ "cli\n\t" /* Restore stack pointer */ - "movl 24(%%esp), %%esp\n\t" + "movl 28(%%esp), %%esp\n\t" /* Restore registers */ "popal\n\t" ) : diff --git a/src/arch/x86/image/initrd.c b/src/arch/x86/image/initrd.c index 8f6366d3d..d7b1f5773 100644 --- a/src/arch/x86/image/initrd.c +++ b/src/arch/x86/image/initrd.c @@ -29,6 +29,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include /** @file * @@ -175,18 +176,18 @@ static int initrd_swap_any ( userptr_t free, size_t free_len ) { /* Search for adjacent image */ for_each_image ( high ) { - /* If we have found the adjacent image, swap and exit */ - if ( high->data == adjacent ) { - initrd_swap ( low, high, free, free_len ); - return 1; - } - /* Stop search if all remaining potential * adjacent images are already in the correct * order. */ if ( high == low ) break; + + /* If we have found the adjacent image, swap and exit */ + if ( high->data == adjacent ) { + initrd_swap ( low, high, free, free_len ); + return 1; + } } } diff --git a/src/arch/x86/include/bits/bigint.h b/src/arch/x86/include/bits/bigint.h index c9bb6ea45..7443d6fdc 100644 --- a/src/arch/x86/include/bits/bigint.h +++ b/src/arch/x86/include/bits/bigint.h @@ -25,19 +25,22 @@ typedef uint32_t bigint_element_t; static inline __attribute__ (( always_inline )) void bigint_init_raw ( uint32_t *value0, unsigned int size, const void *data, size_t len ) { - long pad_len = ( sizeof ( bigint_t ( size ) ) - len ); + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); + long pad_len = ( sizeof ( *value ) - len ); void *discard_D; long discard_c; /* Copy raw data in reverse order, padding with zeros */ __asm__ __volatile__ ( "\n1:\n\t" - "movb -1(%2,%1), %%al\n\t" + "movb -1(%3,%1), %%al\n\t" "stosb\n\t" "loop 1b\n\t" "xorl %%eax, %%eax\n\t" - "mov %3, %1\n\t" + "mov %4, %1\n\t" "rep stosb\n\t" - : "=&D" ( discard_D ), "=&c" ( discard_c ) + : "=&D" ( discard_D ), "=&c" ( discard_c ), + "+m" ( *value ) : "r" ( data ), "g" ( pad_len ), "0" ( value0 ), "1" ( len ) : "eax" ); @@ -53,6 +56,8 @@ bigint_init_raw ( uint32_t *value0, unsigned int size, static inline __attribute__ (( always_inline )) void bigint_add_raw ( const uint32_t *addend0, uint32_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); long index; void *discard_S; long discard_c; @@ -60,11 +65,11 @@ bigint_add_raw ( const uint32_t *addend0, uint32_t *value0, __asm__ __volatile__ ( "xor %0, %0\n\t" /* Zero %0 and clear CF */ "\n1:\n\t" "lodsl\n\t" - "adcl %%eax, (%3,%0,4)\n\t" + "adcl %%eax, (%4,%0,4)\n\t" "inc %0\n\t" /* Does not affect CF */ "loop 1b\n\t" : "=&r" ( index ), "=&S" ( discard_S ), - "=&c" ( discard_c ) + "=&c" ( discard_c ), "+m" ( *value ) : "r" ( value0 ), "1" ( addend0 ), "2" ( size ) : "eax" ); } @@ -79,6 +84,8 @@ bigint_add_raw ( const uint32_t *addend0, uint32_t *value0, static inline __attribute__ (( always_inline )) void bigint_subtract_raw ( const uint32_t *subtrahend0, uint32_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); long index; void *discard_S; long discard_c; @@ -86,11 +93,11 @@ bigint_subtract_raw ( const uint32_t *subtrahend0, uint32_t *value0, __asm__ __volatile__ ( "xor %0, %0\n\t" /* Zero %0 and clear CF */ "\n1:\n\t" "lodsl\n\t" - "sbbl %%eax, (%3,%0,4)\n\t" + "sbbl %%eax, (%4,%0,4)\n\t" "inc %0\n\t" /* Does not affect CF */ "loop 1b\n\t" : "=&r" ( index ), "=&S" ( discard_S ), - "=&c" ( discard_c ) + "=&c" ( discard_c ), "+m" ( *value ) : "r" ( value0 ), "1" ( subtrahend0 ), "2" ( size ) : "eax" ); @@ -104,15 +111,18 @@ bigint_subtract_raw ( const uint32_t *subtrahend0, uint32_t *value0, */ static inline __attribute__ (( always_inline )) void bigint_rol_raw ( uint32_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); long index; long discard_c; __asm__ __volatile__ ( "xor %0, %0\n\t" /* Zero %0 and clear CF */ "\n1:\n\t" - "rcll $1, (%2,%0,4)\n\t" + "rcll $1, (%3,%0,4)\n\t" "inc %0\n\t" /* Does not affect CF */ "loop 1b\n\t" - : "=&r" ( index ), "=&c" ( discard_c ) + : "=&r" ( index ), "=&c" ( discard_c ), + "+m" ( *value ) : "r" ( value0 ), "1" ( size ) ); } @@ -124,13 +134,15 @@ bigint_rol_raw ( uint32_t *value0, unsigned int size ) { */ static inline __attribute__ (( always_inline )) void bigint_ror_raw ( uint32_t *value0, unsigned int size ) { + bigint_t ( size ) __attribute__ (( may_alias )) *value = + ( ( void * ) value0 ); long discard_c; __asm__ __volatile__ ( "clc\n\t" "\n1:\n\t" - "rcrl $1, -4(%1,%0,4)\n\t" + "rcrl $1, -4(%2,%0,4)\n\t" "loop 1b\n\t" - : "=&c" ( discard_c ) + : "=&c" ( discard_c ), "+m" ( *value ) : "r" ( value0 ), "0" ( size ) ); } @@ -167,28 +179,19 @@ bigint_is_zero_raw ( const uint32_t *value0, unsigned int size ) { static inline __attribute__ (( always_inline, pure )) int bigint_is_geq_raw ( const uint32_t *value0, const uint32_t *reference0, unsigned int size ) { - const bigint_t ( size ) __attribute__ (( may_alias )) *value = - ( ( const void * ) value0 ); - const bigint_t ( size ) __attribute__ (( may_alias )) *reference = - ( ( const void * ) reference0 ); - void *discard_S; - void *discard_D; long discard_c; + long discard_tmp; int result; - __asm__ __volatile__ ( "std\n\t" - "\n1:\n\t" - "lodsl\n\t" - "scasl\n\t" + __asm__ __volatile__ ( "\n1:\n\t" + "movl -4(%3, %1, 4), %k2\n\t" + "cmpl -4(%4, %1, 4), %k2\n\t" "loope 1b\n\t" "setae %b0\n\t" - "cld\n\t" - : "=q" ( result ), "=&S" ( discard_S ), - "=&D" ( discard_D ), "=&c" ( discard_c ) - : "0" ( 0 ), "1" ( &value->element[ size - 1 ] ), - "2" ( &reference->element[ size - 1 ] ), - "3" ( size ) - : "eax" ); + : "=q" ( result ), "=&c" ( discard_c ), + "=&r" ( discard_tmp ) + : "r" ( value0 ), "r" ( reference0 ), + "0" ( 0 ), "1" ( size ) ); return result; } @@ -248,6 +251,8 @@ bigint_max_set_bit_raw ( const uint32_t *value0, unsigned int size ) { static inline __attribute__ (( always_inline )) void bigint_grow_raw ( const uint32_t *source0, unsigned int source_size, uint32_t *dest0, unsigned int dest_size ) { + bigint_t ( dest_size ) __attribute__ (( may_alias )) *dest = + ( ( void * ) dest0 ); long pad_size = ( dest_size - source_size ); void *discard_D; void *discard_S; @@ -255,10 +260,10 @@ bigint_grow_raw ( const uint32_t *source0, unsigned int source_size, __asm__ __volatile__ ( "rep movsl\n\t" "xorl %%eax, %%eax\n\t" - "mov %3, %2\n\t" + "mov %4, %2\n\t" "rep stosl\n\t" : "=&D" ( discard_D ), "=&S" ( discard_S ), - "=&c" ( discard_c ) + "=&c" ( discard_c ), "+m" ( *dest ) : "g" ( pad_size ), "0" ( dest0 ), "1" ( source0 ), "2" ( source_size ) : "eax" ); @@ -275,13 +280,15 @@ bigint_grow_raw ( const uint32_t *source0, unsigned int source_size, static inline __attribute__ (( always_inline )) void bigint_shrink_raw ( const uint32_t *source0, unsigned int source_size __unused, uint32_t *dest0, unsigned int dest_size ) { + bigint_t ( dest_size ) __attribute__ (( may_alias )) *dest = + ( ( void * ) dest0 ); void *discard_D; void *discard_S; long discard_c; __asm__ __volatile__ ( "rep movsl\n\t" : "=&D" ( discard_D ), "=&S" ( discard_S ), - "=&c" ( discard_c ) + "=&c" ( discard_c ), "+m" ( *dest ) : "0" ( dest0 ), "1" ( source0 ), "2" ( dest_size ) : "eax" ); @@ -298,15 +305,19 @@ bigint_shrink_raw ( const uint32_t *source0, unsigned int source_size __unused, static inline __attribute__ (( always_inline )) void bigint_done_raw ( const uint32_t *value0, unsigned int size __unused, void *out, size_t len ) { + struct { + uint8_t bytes[len]; + } __attribute__ (( may_alias )) *out_bytes = out; void *discard_D; long discard_c; /* Copy raw data in reverse order */ __asm__ __volatile__ ( "\n1:\n\t" - "movb -1(%2,%1), %%al\n\t" + "movb -1(%3,%1), %%al\n\t" "stosb\n\t" "loop 1b\n\t" - : "=&D" ( discard_D ), "=&c" ( discard_c ) + : "=&D" ( discard_D ), "=&c" ( discard_c ), + "+m" ( *out_bytes ) : "r" ( value0 ), "0" ( out ), "1" ( len ) : "eax" ); } diff --git a/src/arch/x86/include/bits/bitops.h b/src/arch/x86/include/bits/bitops.h index 17dcf1024..f697b8c8f 100644 --- a/src/arch/x86/include/bits/bitops.h +++ b/src/arch/x86/include/bits/bitops.h @@ -29,7 +29,7 @@ set_bit ( unsigned int bit, volatile void *bits ) { uint8_t byte[ ( bit / 8 ) + 1 ]; } *bytes = bits; - __asm__ __volatile__ ( "lock bts %1, %0" + __asm__ __volatile__ ( "lock btsl %k1, %0" : "+m" ( *bytes ) : "Ir" ( bit ) ); } @@ -45,7 +45,7 @@ clear_bit ( unsigned int bit, volatile void *bits ) { uint8_t byte[ ( bit / 8 ) + 1 ]; } *bytes = bits; - __asm__ __volatile__ ( "lock btr %1, %0" + __asm__ __volatile__ ( "lock btrl %k1, %0" : "+m" ( *bytes ) : "Ir" ( bit ) ); } @@ -63,7 +63,7 @@ test_and_set_bit ( unsigned int bit, volatile void *bits ) { } *bytes = bits; int old; - __asm__ __volatile__ ( "lock bts %2, %0\n\t" + __asm__ __volatile__ ( "lock btsl %k2, %0\n\t" "sbb %1, %1\n\t" : "+m" ( *bytes ), "=r" ( old ) : "Ir" ( bit ) ); @@ -84,7 +84,7 @@ test_and_clear_bit ( unsigned int bit, volatile void *bits ) { } *bytes = bits; int old; - __asm__ __volatile__ ( "lock btr %2, %0\n\t" + __asm__ __volatile__ ( "lock btrl %k2, %0\n\t" "sbb %1, %1\n\t" : "+m" ( *bytes ), "=r" ( old ) : "Ir" ( bit ) ); diff --git a/src/arch/x86/include/bits/linux_api_platform.h b/src/arch/x86/include/bits/linux_api_platform.h deleted file mode 100644 index 4a9ced5e2..000000000 --- a/src/arch/x86/include/bits/linux_api_platform.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _LINUX_API_PLATFORM_H -#define _LINUX_API_PLATFORM_H - -extern int linux_errno; - -#endif /* _LINUX_API_PLATFORM_H */ diff --git a/src/arch/x86/include/initrd.h b/src/arch/x86/include/initrd.h index ddb3e5a45..2fb9d3d3a 100644 --- a/src/arch/x86/include/initrd.h +++ b/src/arch/x86/include/initrd.h @@ -11,13 +11,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include -/** Minimum alignment for initrds - * - * Some versions of Linux complain about initrds that are not - * page-aligned. - */ -#define INITRD_ALIGN 4096 - /** Minimum free space required to reshuffle initrds * * Chosen to avoid absurdly long reshuffling times diff --git a/src/arch/x86/include/ipxe/cpuid.h b/src/arch/x86/include/ipxe/cpuid.h index 0ae572da4..3983dfb89 100644 --- a/src/arch/x86/include/ipxe/cpuid.h +++ b/src/arch/x86/include/ipxe/cpuid.h @@ -42,6 +42,12 @@ struct x86_features { /** Hypervisor is present */ #define CPUID_FEATURES_INTEL_ECX_HYPERVISOR 0x80000000UL +/** TSC is present */ +#define CPUID_FEATURES_INTEL_EDX_TSC 0x00000010UL + +/** FXSAVE and FXRSTOR are supported */ +#define CPUID_FEATURES_INTEL_EDX_FXSR 0x01000000UL + /** Get largest extended function */ #define CPUID_AMD_MAX_FN 0x80000000UL diff --git a/src/arch/x86/include/ipxe/pcibios.h b/src/arch/x86/include/ipxe/pcibios.h index 7e1bcd814..bae4eede1 100644 --- a/src/arch/x86/include/ipxe/pcibios.h +++ b/src/arch/x86/include/ipxe/pcibios.h @@ -132,4 +132,17 @@ PCIAPI_INLINE ( pcbios, pci_write_config_dword ) ( struct pci_device *pci, return pcibios_write ( pci, PCIBIOS_WRITE_CONFIG_DWORD | where, value); } +/** + * Map PCI bus address as an I/O address + * + * @v bus_addr PCI bus address + * @v len Length of region + * @ret io_addr I/O address, or NULL on error + */ +static inline __always_inline void * +PCIAPI_INLINE ( pcbios, pci_ioremap ) ( struct pci_device *pci __unused, + unsigned long bus_addr, size_t len ) { + return ioremap ( bus_addr, len ); +} + #endif /* _IPXE_PCIBIOS_H */ diff --git a/src/arch/x86/include/ipxe/pcidirect.h b/src/arch/x86/include/ipxe/pcidirect.h index d924f2f20..decdc8100 100644 --- a/src/arch/x86/include/ipxe/pcidirect.h +++ b/src/arch/x86/include/ipxe/pcidirect.h @@ -32,8 +32,8 @@ extern void pcidirect_prepare ( struct pci_device *pci, int where ); */ static inline __always_inline int PCIAPI_INLINE ( direct, pci_num_bus ) ( void ) { - /* No way to work this out via Type 1 accesses */ - return 0x100; + /* Scan first bus and rely on bridge detection to find higher buses */ + return 1; } /** @@ -138,4 +138,17 @@ PCIAPI_INLINE ( direct, pci_write_config_dword ) ( struct pci_device *pci, return 0; } +/** + * Map PCI bus address as an I/O address + * + * @v bus_addr PCI bus address + * @v len Length of region + * @ret io_addr I/O address, or NULL on error + */ +static inline __always_inline void * +PCIAPI_INLINE ( direct, pci_ioremap ) ( struct pci_device *pci __unused, + unsigned long bus_addr, size_t len ) { + return ioremap ( bus_addr, len ); +} + #endif /* _PCIDIRECT_H */ diff --git a/src/arch/x86/include/ipxe/rsdp.h b/src/arch/x86/include/ipxe/rsdp.h index 7e32c0011..14afcd774 100644 --- a/src/arch/x86/include/ipxe/rsdp.h +++ b/src/arch/x86/include/ipxe/rsdp.h @@ -15,4 +15,17 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ACPI_PREFIX_rsdp __rsdp_ #endif +/** + * Locate ACPI table + * + * @v signature Requested table signature + * @v index Requested index of table with this signature + * @ret table Table, or UNULL if not found + */ +static inline __attribute__ (( always_inline )) userptr_t +ACPI_INLINE ( rsdp, acpi_find ) ( uint32_t signature, unsigned int index ) { + + return acpi_find_via_rsdt ( signature, index ); +} + #endif /* _IPXE_RSDP_H */ diff --git a/src/arch/x86/interface/pcbios/bios_cachedhcp.c b/src/arch/x86/interface/pcbios/bios_cachedhcp.c new file mode 100644 index 000000000..277c40d6f --- /dev/null +++ b/src/arch/x86/interface/pcbios/bios_cachedhcp.c @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** @file + * + * Cached DHCP packet + * + */ + +/** Cached DHCPACK physical address + * + * This can be set by the prefix. + */ +uint32_t __bss16 ( cached_dhcpack_phys ); +#define cached_dhcpack_phys __use_data16 ( cached_dhcpack_phys ) + +/** Colour for debug messages */ +#define colour &cached_dhcpack_phys + +/** + * Cached DHCPACK initialisation function + * + */ +static void cachedhcp_init ( void ) { + int rc; + + /* Do nothing if no cached DHCPACK is present */ + if ( ! cached_dhcpack_phys ) { + DBGC ( colour, "CACHEDHCP found no cached DHCPACK\n" ); + return; + } + + /* Record cached DHCPACK */ + if ( ( rc = cachedhcp_record ( &cached_dhcpack, + phys_to_user ( cached_dhcpack_phys ), + sizeof ( BOOTPLAYER_t ) ) ) != 0 ) { + DBGC ( colour, "CACHEDHCP could not record DHCPACK: %s\n", + strerror ( rc ) ); + return; + } + + /* Mark as consumed */ + cached_dhcpack_phys = 0; +} + +/** Cached DHCPACK initialisation function */ +struct init_fn cachedhcp_init_fn __init_fn ( INIT_NORMAL ) = { + .initialise = cachedhcp_init, +}; diff --git a/src/arch/x86/interface/pcbios/bios_console.c b/src/arch/x86/interface/pcbios/bios_console.c index 52a02fba5..80ebf330e 100644 --- a/src/arch/x86/interface/pcbios/bios_console.c +++ b/src/arch/x86/interface/pcbios/bios_console.c @@ -443,7 +443,7 @@ struct console_driver bios_console __console_driver = { * * @v ix86 Registers as passed to INT 16 */ -static __asmcall void bios_inject ( struct i386_all_regs *ix86 ) { +static __asmcall __used void bios_inject ( struct i386_all_regs *ix86 ) { unsigned int discard_a; unsigned int scancode; unsigned int i; diff --git a/src/arch/x86/interface/pcbios/e820mangler.S b/src/arch/x86/interface/pcbios/e820mangler.S index d5d97b482..296a6488b 100644 --- a/src/arch/x86/interface/pcbios/e820mangler.S +++ b/src/arch/x86/interface/pcbios/e820mangler.S @@ -67,7 +67,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) **************************************************************************** */ .section ".data16", "aw", @progbits - .align 16 + .balign 16 .globl hidemem_base .globl hidemem_umalloc .globl hidemem_textdata diff --git a/src/arch/x86/interface/pcbios/int13.c b/src/arch/x86/interface/pcbios/int13.c index ca789a0d1..d6c4d7ebf 100644 --- a/src/arch/x86/interface/pcbios/int13.c +++ b/src/arch/x86/interface/pcbios/int13.c @@ -678,10 +678,10 @@ static int int13_get_disk_type ( struct san_device *sandev, * @ret cx Extensions API support bitmap * @ret status Status code / API version */ -static int int13_extension_check ( struct san_device *sandev __unused, +static int int13_extension_check ( struct san_device *sandev, struct i386_all_regs *ix86 ) { - if ( ix86->regs.bx == 0x55aa ) { + if ( ( ix86->regs.bx == 0x55aa ) && ! int13_is_fdd ( sandev ) ) { DBGC2 ( sandev, "INT13 extensions installation check\n" ); ix86->regs.bx = 0xaa55; ix86->regs.cx = ( INT13_EXTENSION_LINEAR | @@ -1064,7 +1064,7 @@ static int int13_cdrom_read_boot_catalog ( struct san_device *sandev, * INT 13 handler * */ -static __asmcall void int13 ( struct i386_all_regs *ix86 ) { +static __asmcall __used void int13 ( struct i386_all_regs *ix86 ) { int command = ix86->regs.ah; unsigned int bios_drive = ix86->regs.dl; struct san_device *sandev; diff --git a/src/arch/x86/interface/pcbios/memtop_umalloc.c b/src/arch/x86/interface/pcbios/memtop_umalloc.c index f1ab73e29..1d3f40a1c 100644 --- a/src/arch/x86/interface/pcbios/memtop_umalloc.c +++ b/src/arch/x86/interface/pcbios/memtop_umalloc.c @@ -190,14 +190,14 @@ static userptr_t memtop_urealloc ( userptr_t ptr, size_t new_size ) { /* Expand/shrink block if possible */ if ( ptr == bottom ) { /* Update block */ - if ( new_size > ( heap_size - extmem.size ) ) { - DBG ( "EXTMEM out of space\n" ); - return UNULL; - } new = userptr_add ( ptr, - ( new_size - extmem.size ) ); align = ( user_to_phys ( new, 0 ) & ( EM_ALIGN - 1 ) ); new_size += align; new = userptr_add ( new, -align ); + if ( new_size > ( heap_size + extmem.size ) ) { + DBG ( "EXTMEM out of space\n" ); + return UNULL; + } DBG ( "EXTMEM expanding [%lx,%lx) to [%lx,%lx)\n", user_to_phys ( ptr, 0 ), user_to_phys ( ptr, extmem.size ), diff --git a/src/arch/x86/interface/pcbios/pcibios.c b/src/arch/x86/interface/pcbios/pcibios.c index 07ac0c18d..bf812f77f 100644 --- a/src/arch/x86/interface/pcbios/pcibios.c +++ b/src/arch/x86/interface/pcbios/pcibios.c @@ -121,3 +121,4 @@ PROVIDE_PCIAPI_INLINE ( pcbios, pci_read_config_dword ); PROVIDE_PCIAPI_INLINE ( pcbios, pci_write_config_byte ); PROVIDE_PCIAPI_INLINE ( pcbios, pci_write_config_word ); PROVIDE_PCIAPI_INLINE ( pcbios, pci_write_config_dword ); +PROVIDE_PCIAPI_INLINE ( pcbios, pci_ioremap ); diff --git a/src/arch/x86/interface/pcbios/rsdp.c b/src/arch/x86/interface/pcbios/rsdp.c index 8da0b5588..3c67b7525 100644 --- a/src/arch/x86/interface/pcbios/rsdp.c +++ b/src/arch/x86/interface/pcbios/rsdp.c @@ -123,3 +123,4 @@ static userptr_t rsdp_find_rsdt ( void ) { } PROVIDE_ACPI ( rsdp, acpi_find_rsdt, rsdp_find_rsdt ); +PROVIDE_ACPI_INLINE ( rsdp, acpi_find ); diff --git a/src/arch/x86/interface/pcbios/rtc_entropy.c b/src/arch/x86/interface/pcbios/rtc_entropy.c index e9e6baa59..e0c175685 100644 --- a/src/arch/x86/interface/pcbios/rtc_entropy.c +++ b/src/arch/x86/interface/pcbios/rtc_entropy.c @@ -36,6 +36,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include /** Maximum time to wait for an RTC interrupt, in milliseconds */ @@ -174,8 +175,17 @@ static int rtc_entropy_check ( void ) { * @ret rc Return status code */ static int rtc_entropy_enable ( void ) { + struct x86_features features; int rc; + /* Check that TSC is supported */ + x86_features ( &features ); + if ( ! ( features.intel.edx & CPUID_FEATURES_INTEL_EDX_TSC ) ) { + DBGC ( &rtc_flag, "RTC has no TSC\n" ); + rc = -ENOTSUP; + goto err_no_tsc; + } + /* Hook ISR and enable RTC interrupts */ rtc_hook_isr(); enable_irq ( RTC_IRQ ); @@ -191,6 +201,7 @@ static int rtc_entropy_enable ( void ) { rtc_disable_int(); disable_irq ( RTC_IRQ ); rtc_unhook_isr(); + err_no_tsc: return rc; } diff --git a/src/arch/x86/interface/pxe/pxe_entry.S b/src/arch/x86/interface/pxe/pxe_entry.S index 663aa842e..3a5a100e3 100644 --- a/src/arch/x86/interface/pxe/pxe_entry.S +++ b/src/arch/x86/interface/pxe/pxe_entry.S @@ -34,7 +34,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) */ .section ".text16.data", "aw", @progbits .globl ppxe - .align 16 + .balign 16 ppxe: .ascii "!PXE" /* Signature */ .byte pxe_length /* StructLength */ @@ -72,7 +72,7 @@ undiheader: */ .section ".text16.data", "aw", @progbits .globl pxenv - .align 16 + .balign 16 pxenv: .ascii "PXENV+" /* Signature */ .word 0x0201 /* Version */ diff --git a/src/arch/x86/interface/syslinux/comboot_call.c b/src/arch/x86/interface/syslinux/comboot_call.c index e70f200e3..dc308dafe 100644 --- a/src/arch/x86/interface/syslinux/comboot_call.c +++ b/src/arch/x86/interface/syslinux/comboot_call.c @@ -220,7 +220,7 @@ static int comboot_fetch_kernel ( char *kernel_file, char *cmdline ) { /** * Terminate program interrupt handler */ -static __asmcall void int20 ( struct i386_all_regs *ix86 __unused ) { +static __asmcall __used void int20 ( struct i386_all_regs *ix86 __unused ) { rmlongjmp ( comboot_return, COMBOOT_EXIT ); } @@ -228,7 +228,7 @@ static __asmcall void int20 ( struct i386_all_regs *ix86 __unused ) { /** * DOS-compatible API */ -static __asmcall void int21 ( struct i386_all_regs *ix86 ) { +static __asmcall __used void int21 ( struct i386_all_regs *ix86 ) { ix86->flags |= CF; switch ( ix86->regs.ah ) { @@ -311,7 +311,7 @@ __weak int pxe_api_call_weak ( struct i386_all_regs *ix86 __unused ) { /** * SYSLINUX API */ -static __asmcall void int22 ( struct i386_all_regs *ix86 ) { +static __asmcall __used void int22 ( struct i386_all_regs *ix86 ) { ix86->flags |= CF; switch ( ix86->regs.ax ) { diff --git a/src/arch/x86/prefix/exeprefix.S b/src/arch/x86/prefix/exeprefix.S index c351456e2..0eab8c12a 100644 --- a/src/arch/x86/prefix/exeprefix.S +++ b/src/arch/x86/prefix/exeprefix.S @@ -110,7 +110,7 @@ overlay: /* Overlay number */ .word 0 - .align 16, 0 + .balign 16, 0 .globl _exe_start _exe_start: diff --git a/src/arch/x86/prefix/mromprefix.S b/src/arch/x86/prefix/mromprefix.S index 2b5c6bf64..d08284d7a 100644 --- a/src/arch/x86/prefix/mromprefix.S +++ b/src/arch/x86/prefix/mromprefix.S @@ -492,7 +492,7 @@ mromheader: .word 0 .size mromheader, . - mromheader - .align 4 + .balign 4 mpciheader: .ascii "PCIR" /* Signature */ .word pci_vendor_id /* Vendor identification */ diff --git a/src/arch/x86/prefix/rawprefix.S b/src/arch/x86/prefix/rawprefix.S new file mode 100644 index 000000000..4cf5f391e --- /dev/null +++ b/src/arch/x86/prefix/rawprefix.S @@ -0,0 +1,53 @@ +/* + * Raw binary prefix + * + * Assumes that entire image is already loaded as a contiguous block + * on a paragraph boundary and entered in real mode. + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) + + .text + .arch i386 + .org 0 + .code16 + +#include + + .section ".prefix", "ax", @progbits + .globl _raw_start +_raw_start: + + /* Adjust %cs so that %cs:0000 is the start of the image */ + movw %cs, %ax + call 1f +1: popw %bx + subw $1b, %bx + shrw $4, %bx + addw %bx, %ax + pushw %ax + pushw $2f + lret +2: + /* Install iPXE */ + call install + + /* Set up real-mode stack */ + movw %bx, %ss + movw $_estack16, %sp + + /* Jump to .text16 segment */ + pushw %ax + pushw $1f + lret + .section ".text16", "awx", @progbits +1: + /* Run iPXE */ + virtcall main + + /* Uninstall iPXE */ + call uninstall + + /* Boot next device */ + int $0x18 diff --git a/src/arch/x86/prefix/romprefix.S b/src/arch/x86/prefix/romprefix.S index 3abef0eaf..a9934a725 100644 --- a/src/arch/x86/prefix/romprefix.S +++ b/src/arch/x86/prefix/romprefix.S @@ -88,7 +88,7 @@ checksum: .previous .ifeqs BUSTYPE, "PCIR" - .align 4 + .balign 4 pciheader: .ascii "PCIR" /* Signature */ .word pci_vendor_id /* Vendor identification */ @@ -136,7 +136,7 @@ pci_devlist_end: * BIOSes will scan on 16-byte boundaries rather than using * the offset stored at 0x1a */ - .align 16 + .balign 16 pnpheader: .ascii "$PnP" /* Signature */ .byte 0x01 /* Structure revision */ @@ -184,7 +184,7 @@ prodstr_pci_id: .globl undiheader .weak undiloader - .align 4 + .balign 4 undiheader: .ascii "UNDI" /* Signature */ .byte undiheader_len /* Length of structure */ @@ -199,7 +199,7 @@ undiheader: .equ undiheader_len, . - undiheader .size undiheader, . - undiheader - .align 4 + .balign 4 ipxeheader: .ascii "iPXE" /* Signature */ .byte ipxeheader_len /* Length of structure */ diff --git a/src/arch/x86/prefix/unlzma.S b/src/arch/x86/prefix/unlzma.S index ce18c756f..979f699ee 100644 --- a/src/arch/x86/prefix/unlzma.S +++ b/src/arch/x86/prefix/unlzma.S @@ -44,7 +44,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); */ .text - .arch i586 + .arch i486 .section ".prefix.lib", "ax", @progbits #ifdef CODE16 @@ -231,7 +231,7 @@ rep_len_dec: .space sizeof__lzma_len_dec literal: .rept ( ( 1 << LZMA_LC ) * 0x300 ) .word 0 .endr - .align 4 + .balign 4 .equ sizeof__lzma_dec, . - lzma_dec .previous diff --git a/src/arch/x86/prefix/usbdisk.S b/src/arch/x86/prefix/usbdisk.S index 9676406e2..977de6dd6 100644 --- a/src/arch/x86/prefix/usbdisk.S +++ b/src/arch/x86/prefix/usbdisk.S @@ -1,5 +1,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) +#include + .text .arch i386 .section ".prefix", "awx", @progbits @@ -9,26 +11,68 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) #include "mbr.S" /* Partition table: 64 heads, 32 sectors/track (ZIP-drive compatible) */ +#define HEADS 64 +#define SECTORS 32 +#define CYLADDR(cyl) ((((cyl) * HEADS + (((cyl) == 0) & 1)) * SECTORS) * 512) + +#ifdef CONSOLE_INT13 +#define LOGPART 1 +#define LOGSTART 0 +#define LOGCOUNT 1 +#define BOOTSTART 1 +#define BOOTCOUNT 2 +#else /* CONSOLE_INT13 */ +#define LOGPART 0 +#define BOOTSTART 0 +#define BOOTCOUNT 2 +#endif /* CONSOLE_INT13 */ + + /* Construct a C/H/S address */ + .macro chs cylinder, head, sector + .byte \head + .byte (((\cylinder & 0x300) >> 2) | \sector) + .byte (\cylinder & 0x0ff) + .endm + + /* Construct a linear address */ + .macro linear cylinders, heads, sectors + .long ((((\cylinders * HEADS) + \heads) * SECTORS) + \sectors - 1) + .endm + + /* Construct a partition table entry */ + .macro partition bootflag, type, start, count + .byte \bootflag + chs \start, ((\start == 0) & 1), 1 + .byte \type + chs (\start + \count - 1), (HEADS - 1), SECTORS + linear \start, ((\start == 0) & 1), 1 + linear \count, 0, (1 - (((\start == 0) & 1) * SECTORS)) + .endm + + /* Partition table */ .org 446 .space 16 .space 16 - /* Partition 3: log partition (for CONSOLE_INT13) */ - .byte 0x00, 0x01, 0x01, 0x00 - .byte 0xe0, 0x3f, 0x20, 0x00 - .long 0x00000020 - .long 0x000007e0 - /* Partition 4: boot partition */ - .byte 0x80, 0x00, 0x01, 0x01 - .byte 0xeb, 0x3f, 0x20, 0x02 - .long 0x00000800 - .long 0x00001000 + /* Partition 3: log partition (for CONSOLE_INT13) */ + .if LOGPART + partition 0x00, 0xe0, LOGSTART, LOGCOUNT + .else + .space 16 + .endif + + /* Partition 4: boot partition */ + partition 0x80, 0xeb, BOOTSTART, BOOTCOUNT + + /* Disk signature */ .org 510 .byte 0x55, 0xaa /* Skip to start of log partition */ - .org 32 * 512 + .if LOGPART + .org CYLADDR(LOGSTART) .ascii "iPXE LOG\n\n" + .endif /* Skip to start of boot partition */ - .org 2048 * 512 + .org CYLADDR(BOOTSTART) diff --git a/src/arch/x86/scripts/pcbios.lds b/src/arch/x86/scripts/pcbios.lds index c9a91c02b..de59adca9 100644 --- a/src/arch/x86/scripts/pcbios.lds +++ b/src/arch/x86/scripts/pcbios.lds @@ -58,11 +58,12 @@ SECTIONS { *(SORT(.pci_devlist.*)) *(.prefix.*) _mprefix = .; - } .bss.prefix (NOLOAD) : AT ( _end_lma ) { + } .bss.prefix (NOLOAD) : AT ( _bss_prefix_lma ) { _eprefix = .; } _prefix_filesz = ABSOLUTE ( _mprefix ) - ABSOLUTE ( _prefix ); _prefix_memsz = ABSOLUTE ( _eprefix ) - ABSOLUTE ( _prefix ); + _prefix_padsz = ABSOLUTE ( _eprefix ) - ABSOLUTE ( _mprefix ); /* * The 16-bit (real-mode) code section @@ -82,7 +83,7 @@ SECTIONS { *(.text16) *(.text16.*) _mtext16 = .; - } .bss.text16 (NOLOAD) : AT ( _end_lma ) { + } .bss.text16 (NOLOAD) : AT ( _bss_text16_lma ) { _etext16 = .; } _text16_early_filesz = ABSOLUTE ( _etext16_early ) - ABSOLUTE ( _text16 ); @@ -90,6 +91,7 @@ SECTIONS { _text16_late_filesz = ABSOLUTE ( _mtext16 ) - ABSOLUTE ( _text16_late ); _text16_late_memsz = ABSOLUTE ( _etext16 ) - ABSOLUTE ( _text16_late ); _text16_memsz = ABSOLUTE ( _etext16 ) - ABSOLUTE ( _text16 ); + _text16_padsz = ABSOLUTE ( _etext16 ) - ABSOLUTE ( _mtext16 ); /* * The 16-bit (real-mode) data section @@ -104,7 +106,7 @@ SECTIONS { *(.data16) *(.data16.*) _mdata16 = .; - } .bss.data16 (NOLOAD) : AT ( _end_lma ) { + } .bss.data16 (NOLOAD) : AT ( _bss_data16_lma ) { *(.bss16) *(.bss16.*) *(.stack16) @@ -114,6 +116,7 @@ SECTIONS { } _data16_filesz = ABSOLUTE ( _mdata16 ) - ABSOLUTE ( _data16 ); _data16_memsz = ABSOLUTE ( _edata16 ) - ABSOLUTE ( _data16 ); + _data16_padsz = ABSOLUTE ( _edata16 ) - ABSOLUTE ( _mdata16 ); /* * The 32-bit sections @@ -135,7 +138,7 @@ SECTIONS { KEEP(*(.provided)) KEEP(*(.provided.*)) _mtextdata = .; - } .bss.textdata (NOLOAD) : AT ( _end_lma ) { + } .bss.textdata (NOLOAD) : AT ( _bss_textdata_lma ) { *(.bss) *(.bss.*) *(COMMON) @@ -157,6 +160,7 @@ SECTIONS { } _textdata_filesz = ABSOLUTE ( _mtextdata ) - ABSOLUTE ( _textdata ); _textdata_memsz = ABSOLUTE ( _etextdata ) - ABSOLUTE ( _textdata ); + _textdata_padsz = ABSOLUTE ( _etextdata ) - ABSOLUTE ( _mtextdata ); /* * Payload prefix @@ -169,11 +173,12 @@ SECTIONS { KEEP(*(.pprefix)) KEEP(*(.pprefix.*)) _mpprefix = .; - } .bss.pprefix (NOLOAD) : AT ( _end_lma ) { + } .bss.pprefix (NOLOAD) : AT ( _bss_pprefix_lma ) { _epprefix = .; } _pprefix_filesz = ABSOLUTE ( _mpprefix ) - ABSOLUTE ( _pprefix ); _pprefix_memsz = ABSOLUTE ( _epprefix ) - ABSOLUTE ( _pprefix ); + _pprefix_padsz = ABSOLUTE ( _epprefix ) - ABSOLUTE ( _mpprefix ); /* * Compressor information block @@ -185,11 +190,12 @@ SECTIONS { KEEP(*(.zinfo)) KEEP(*(.zinfo.*)) _mzinfo = .; - } .bss.zinfo (NOLOAD) : AT ( _end_lma ) { + } .bss.zinfo (NOLOAD) : AT ( _bss_zinfo_lma ) { _ezinfo = .; } _zinfo_filesz = ABSOLUTE ( _mzinfo ) - ABSOLUTE ( _zinfo ); _zinfo_memsz = ABSOLUTE ( _ezinfo ) - ABSOLUTE ( _zinfo ); + _zinfo_padsz = ABSOLUTE ( _ezinfo ) - ABSOLUTE ( _mzinfo ); /* * Weak symbols that need zero values if not otherwise defined @@ -235,36 +241,65 @@ SECTIONS { . = ALIGN ( _max_align ); _prefix_lma = .; - . += _prefix_filesz; + . += ABSOLUTE ( _prefix_filesz ); . = ALIGN ( _max_align ); _text16_early_lma = .; - . += _text16_early_filesz; + . += ABSOLUTE ( _text16_early_filesz ); . = ALIGN ( _max_align ); . = ALIGN ( _payload_align ); _pprefix_lma = .; - . += _pprefix_filesz; + . += ABSOLUTE ( _pprefix_filesz ); . = ALIGN ( _max_align ); _payload_lma = .; _pprefix_skip = ABSOLUTE ( _payload_lma ) - ABSOLUTE ( _pprefix_lma ); _text16_late_lma = .; - . += _text16_late_filesz; + . += ABSOLUTE ( _text16_late_filesz ); . = ALIGN ( _max_align ); _data16_lma = .; - . += _data16_filesz; + . += ABSOLUTE ( _data16_filesz ); . = ALIGN ( _max_align ); _textdata_lma = .; - . += _textdata_filesz; + . += ABSOLUTE ( _textdata_filesz ); - _filesz = .; /* Do not include zinfo block in file size */ + _filesz = .; /* Do not include .bss.* or .zinfo in file size */ + + /* + * Dummy load addresses for .bss.* and .zinfo sections + * + */ + + . = ALIGN ( _max_align ); + _bss_prefix_lma = .; + . += ABSOLUTE ( _prefix_padsz ); + + . = ALIGN ( _max_align ); + _bss_text16_lma = .; + . += ABSOLUTE ( _text16_padsz ); + + . = ALIGN ( _max_align ); + _bss_data16_lma = .; + . += ABSOLUTE ( _data16_padsz ); + + . = ALIGN ( _max_align ); + _bss_textdata_lma = .; + . += ABSOLUTE ( _textdata_padsz ); + + . = ALIGN ( _max_align ); + _bss_pprefix_lma = .; + . += ABSOLUTE ( _pprefix_padsz ); + + . = ALIGN ( _max_align ); + _bss_zinfo_lma = .; + . += ABSOLUTE ( _zinfo_padsz ); . = ALIGN ( _max_align ); _zinfo_lma = .; - . += _zinfo_filesz; + . += ABSOLUTE ( _zinfo_filesz ); . = ALIGN ( _max_align ); _end_lma = .; diff --git a/src/arch/x86/scripts/prefixonly.lds b/src/arch/x86/scripts/prefixonly.lds new file mode 100644 index 000000000..dce0930b5 --- /dev/null +++ b/src/arch/x86/scripts/prefixonly.lds @@ -0,0 +1,29 @@ +/* -*- ld-script -*- */ + +/* + * Linker script for prefix-only binaries (e.g. USB disk MBR) + * + */ + +SECTIONS { + + .prefix 0x0 : AT ( 0x0 ) { + *(.prefix) + } + + /DISCARD/ : { + *(.comment) + *(.comment.*) + *(.note) + *(.note.*) + *(.eh_frame) + *(.eh_frame.*) + *(.rel) + *(.rel.*) + *(.einfo) + *(.einfo.*) + *(.discard) + *(.discard.*) + } + +} diff --git a/src/arch/x86/transitions/liba20.S b/src/arch/x86/transitions/liba20.S index 6c1e1f62f..57603353e 100644 --- a/src/arch/x86/transitions/liba20.S +++ b/src/arch/x86/transitions/liba20.S @@ -285,7 +285,7 @@ enable_a20: ret .section ".text16.early.data", "aw", @progbits - .align 2 + .balign 2 enable_a20_method: .word 0 .size enable_a20_method, . - enable_a20_method diff --git a/src/arch/x86/transitions/librm.S b/src/arch/x86/transitions/librm.S index 9d3eff954..5dacb9b04 100644 --- a/src/arch/x86/transitions/librm.S +++ b/src/arch/x86/transitions/librm.S @@ -99,7 +99,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ) **************************************************************************** */ .section ".data16.gdt", "aw", @progbits - .align 16 + .balign 16 gdt: gdtr: /* The first GDT entry is unused, the GDTR can fit here. */ gdt_limit: .word gdt_length - 1 @@ -210,9 +210,7 @@ VC_TMP_CR3: .space 4 VC_TMP_CR4: .space 4 VC_TMP_EMER: .space 8 .endif -#ifdef TIVOLI_VMM_WORKAROUND VC_TMP_FXSAVE: .space 512 -#endif VC_TMP_END: .previous @@ -224,7 +222,7 @@ RC_TMP_END: /* Shared temporary static buffer */ .section ".bss16.rm_tmpbuf", "aw", @nobits - .align 16 + .balign 16 rm_tmpbuf: .space VC_TMP_END .size rm_tmpbuf, . - rm_tmpbuf @@ -350,6 +348,13 @@ init_librm_rmode: /* Initialise IDT */ virtcall init_idt +#ifdef TIVOLI_VMM_WORKAROUND + /* Check for FXSAVE/FXRSTOR */ + clc + virtcall check_fxsr + setnc fxsr_supported +#endif + /* Restore registers */ popl %edi popl %ebx @@ -366,6 +371,10 @@ set_seg_base: roll $16, %eax ret + .section ".data16.fxsr_supported", "awx", @progbits +fxsr_supported: /* FXSAVE/FXRSTOR instructions supported */ + .byte 0 + /**************************************************************************** * real_to_prot (real-mode near call, 32-bit virtual return address) * @@ -1007,10 +1016,11 @@ virt_call: cli movw %cs:rm_ds, %ds -#ifdef TIVOLI_VMM_WORKAROUND /* Preserve FPU, MMX and SSE state in temporary static buffer */ + testb $0xff, fxsr_supported + jz 1f fxsave ( rm_tmpbuf + VC_TMP_FXSAVE ) -#endif +1: /* Preserve GDT and IDT in temporary static buffer */ sidt ( rm_tmpbuf + VC_TMP_IDT ) sgdt ( rm_tmpbuf + VC_TMP_GDT ) @@ -1077,10 +1087,11 @@ vc_rmode: wrmsr .endif -#ifdef TIVOLI_VMM_WORKAROUND /* Restore FPU, MMX and SSE state from temporary static buffer */ + testb $0xff, fxsr_supported + jz 1f fxrstor ( rm_tmpbuf + VC_TMP_FXSAVE ) -#endif +1: /* Restore registers and flags and return */ popl %eax /* skip %cs and %ss */ popw %ds @@ -1470,7 +1481,7 @@ interrupt_wrapper: **************************************************************************** */ .section ".pages", "aw", @nobits - .align SIZEOF_PT + .balign SIZEOF_PT /* Page map level 4 entries (PML4Es) * diff --git a/src/arch/x86/transitions/librm_mgmt.c b/src/arch/x86/transitions/librm_mgmt.c index f9e1d261a..da221e8b1 100644 --- a/src/arch/x86/transitions/librm_mgmt.c +++ b/src/arch/x86/transitions/librm_mgmt.c @@ -14,6 +14,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include /* * This file provides functions for managing librm. @@ -118,7 +119,7 @@ void set_interrupt_vector ( unsigned int intr, void *vector ) { * Initialise interrupt descriptor table * */ -void init_idt ( void ) { +__asmcall void init_idt ( void ) { struct interrupt_vector *vec; unsigned int intr; @@ -386,6 +387,21 @@ static void iounmap_pages ( volatile const void *io_addr ) { io_addr, first, i ); } +/** + * Check for FXSAVE/FXRSTOR instruction support + * + */ +__asmcall void check_fxsr ( struct i386_all_regs *regs ) { + struct x86_features features; + + /* Check for FXSR bit */ + x86_features ( &features ); + if ( ! ( features.intel.edx & CPUID_FEATURES_INTEL_EDX_FXSR ) ) + regs->flags |= CF; + DBGC ( &features, "FXSAVE/FXRSTOR is%s supported\n", + ( ( regs->flags & CF ) ? " not" : "" ) ); +} + PROVIDE_UACCESS_INLINE ( librm, phys_to_user ); PROVIDE_UACCESS_INLINE ( librm, user_to_phys ); PROVIDE_UACCESS_INLINE ( librm, virt_to_user ); diff --git a/src/arch/x86/transitions/librm_test.c b/src/arch/x86/transitions/librm_test.c index 77cf8022c..347d86a15 100644 --- a/src/arch/x86/transitions/librm_test.c +++ b/src/arch/x86/transitions/librm_test.c @@ -58,7 +58,8 @@ static struct profiler virt_call_profiler __profiler = { .name = "virt_call" }; /** * Dummy function for profiling tests */ -static __asmcall void librm_test_call ( struct i386_all_regs *ix86 __unused ) { +static __asmcall __used void +librm_test_call ( struct i386_all_regs *ix86 __unused ) { /* Do nothing */ } diff --git a/src/arch/x86_64/Makefile.linux b/src/arch/x86_64/Makefile.linux index 154f9d40d..c41ee49df 100644 --- a/src/arch/x86_64/Makefile.linux +++ b/src/arch/x86_64/Makefile.linux @@ -1,6 +1,10 @@ +# -*- makefile -*- : Force emacs to use Makefile mode + +# Linker script +# LDSCRIPT = arch/x86_64/scripts/linux.lds -SRCDIRS += arch/x86_64/core/linux - +# Include generic Linux Makefile +# MAKEDEPS += arch/x86/Makefile.linux include arch/x86/Makefile.linux diff --git a/src/arch/x86_64/core/linux/linux_syscall.S b/src/arch/x86_64/core/linux/linux_syscall.S deleted file mode 100644 index d2805f94c..000000000 --- a/src/arch/x86_64/core/linux/linux_syscall.S +++ /dev/null @@ -1,33 +0,0 @@ - - .section ".data" - .globl linux_errno - -linux_errno: .int 0 - - .section ".text" - .code64 - .globl linux_syscall - .type linux_syscall, @function - -linux_syscall: - movq %rdi, %rax // C arg1 -> syscall number - movq %rsi, %rdi // C arg2 -> syscall arg1 - movq %rdx, %rsi // C arg3 -> syscall arg2 - movq %rcx, %rdx // C arg4 -> syscall arg3 - movq %r8, %r10 // C arg5 -> syscall arg4 - movq %r9, %r8 // C arg6 -> syscall arg5 - movq 8(%rsp), %r9 // C arg7 -> syscall arg6 - - syscall - - cmpq $-4095, %rax - jae 1f - ret - -1: - negq %rax - movl %eax, linux_errno - movq $-1, %rax - ret - - .size linux_syscall, . - linux_syscall diff --git a/src/arch/x86_64/core/linux/linuxprefix.S b/src/arch/x86_64/core/linux/linuxprefix.S deleted file mode 100644 index ec8a9decd..000000000 --- a/src/arch/x86_64/core/linux/linuxprefix.S +++ /dev/null @@ -1,25 +0,0 @@ -#include - - .section ".text" - .code64 - .globl _linux_start - .type _linux_start, @function - -_linux_start: - xorq %rbp, %rbp - - popq %rdi // argc -> C arg1 - movq %rsp, %rsi // argv -> C arg2 - - andq $~15, %rsp // 16-byte align the stack - - call save_args - - /* Our main doesn't use any arguments */ - call main - - movq %rax, %rdi // rc -> syscall arg1 - movq $__NR_exit, %rax - syscall - - .size _linux_start, . - _linux_start diff --git a/src/arch/x86_64/include/bits/compiler.h b/src/arch/x86_64/include/bits/compiler.h index 46985da3e..5129f90d0 100644 --- a/src/arch/x86_64/include/bits/compiler.h +++ b/src/arch/x86_64/include/bits/compiler.h @@ -9,7 +9,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #ifndef ASSEMBLY /** Declare a function with standard calling conventions */ -#define __asmcall __attribute__ (( used, regparm(0) )) +#define __asmcall __attribute__ (( regparm(0) )) /** Declare a function with libgcc implicit linkage */ #define __libgcc diff --git a/src/arch/x86_64/include/bits/linux_api.h b/src/arch/x86_64/include/bits/linux_api.h deleted file mode 100644 index 589fb5808..000000000 --- a/src/arch/x86_64/include/bits/linux_api.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _X86_64_LINUX_API_H -#define _X86_64_LINUX_API_H - -#define __SYSCALL_mmap __NR_mmap - -#endif /* _X86_64_LINUX_API_H */ diff --git a/src/config/cloud/aws.ipxe b/src/config/cloud/aws.ipxe index 2c96e3888..6c007398e 100644 --- a/src/config/cloud/aws.ipxe +++ b/src/config/cloud/aws.ipxe @@ -3,6 +3,22 @@ echo Amazon EC2 - iPXE boot via user-data echo CPU: ${cpuvendor} ${cpumodel} ifstat || -dhcp || + +set attempt:int8 1 +:dhcp_retry +echo DHCP attempt ${attempt} +dhcp --timeout 5000 && goto dhcp_ok || +ifstat || +inc attempt +iseq ${attempt} 10 || goto dhcp_retry + +:dhcp_fail +echo DHCP failed - rebooting +reboot || +exit + +:dhcp_ok route || -chain -ar http://169.254.169.254/latest/user-data +chain -ar http://169.254.169.254/latest/user-data || +ifstat || +exit diff --git a/src/config/cloud/console.h b/src/config/cloud/console.h index dae18e556..83318dd30 100644 --- a/src/config/cloud/console.h +++ b/src/config/cloud/console.h @@ -18,8 +18,13 @@ * Note that the serial port output from an AWS EC2 virtual machine is * generally available (as the "System Log") only after the instance * has been stopped. + * + * Enable only for non-EFI builds, on the assumption that the standard + * EFI firmware is likely to already be logging to the serial port. */ +#ifndef PLATFORM_efi #define CONSOLE_SERIAL +#endif /* Log to partition on local disk * diff --git a/src/config/cloud/gce.ipxe b/src/config/cloud/gce.ipxe index 88e12b56b..65e2e57dc 100644 --- a/src/config/cloud/gce.ipxe +++ b/src/config/cloud/gce.ipxe @@ -5,4 +5,5 @@ echo CPU: ${cpuvendor} ${cpumodel} ifstat || dhcp || route || -chain -ar http://metadata.google.internal/computeMetadata/v1/instance/attributes/ipxeboot +chain -ar http://metadata.google.internal/computeMetadata/v1/instance/attributes/ipxeboot || +ifstat || diff --git a/src/config/cloud/general.h b/src/config/cloud/general.h index 99028c147..fc881163a 100644 --- a/src/config/cloud/general.h +++ b/src/config/cloud/general.h @@ -1,4 +1,13 @@ +/* Enable IPv6 and HTTPS */ +#define NET_PROTO_IPV6 +#define DOWNLOAD_PROTO_HTTPS + /* Allow retrieval of metadata (such as an iPXE boot script) from * Google Compute Engine metadata server. */ #define HTTP_HACK_GCE + +/* Allow scripts to handle errors by powering down the VM to avoid + * incurring unnecessary costs. + */ +#define POWEROFF_CMD diff --git a/src/config/cloud/ioapi.h b/src/config/cloud/ioapi.h new file mode 100644 index 000000000..c7c917f2e --- /dev/null +++ b/src/config/cloud/ioapi.h @@ -0,0 +1,7 @@ +/* Work around missing PCI BIOS calls in the cut-down SeaBIOS found in + * some AWS EC2 instances. + */ +#ifdef PLATFORM_pcbios +#undef PCIAPI_PCBIOS +#define PCIAPI_DIRECT +#endif diff --git a/src/config/cloud/settings.h b/src/config/cloud/settings.h index 34deeb070..c9d6bdc93 100644 --- a/src/config/cloud/settings.h +++ b/src/config/cloud/settings.h @@ -1,4 +1,6 @@ /* It can often be useful to know the CPU on which a cloud instance is * running (e.g. to isolate problems with Azure AMD instances). */ +#if defined ( __i386__ ) || defined ( __x86_64__ ) #define CPUID_SETTINGS +#endif diff --git a/src/config/config.c b/src/config/config.c index 2ca05dff7..a81866132 100644 --- a/src/config/config.c +++ b/src/config/config.c @@ -182,6 +182,12 @@ REQUIRE_OBJECT ( efi_image ); #ifdef IMAGE_SDI REQUIRE_OBJECT ( sdi ); #endif +#ifdef IMAGE_ZLIB +REQUIRE_OBJECT ( zlib ); +#endif +#ifdef IMAGE_GZIP +REQUIRE_OBJECT ( gzip ); +#endif /* * Drag in all requested commands @@ -281,6 +287,9 @@ REQUIRE_OBJECT ( ntp_cmd ); #ifdef CERT_CMD REQUIRE_OBJECT ( cert_cmd ); #endif +#ifdef IMAGE_MEM_CMD +REQUIRE_OBJECT ( image_mem_cmd ); +#endif /* * Drag in miscellaneous objects diff --git a/src/config/config_archive.c b/src/config/config_archive.c new file mode 100644 index 000000000..746fc7e44 --- /dev/null +++ b/src/config/config_archive.c @@ -0,0 +1,36 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +/** @file + * + * Archive image configuration + * + */ + +PROVIDE_REQUIRING_SYMBOL(); + +#ifdef IMAGE_ARCHIVE_CMD +REQUIRE_OBJECT ( image_archive_cmd ); +#endif diff --git a/src/config/config_crypto.c b/src/config/config_crypto.c index 1e125d8ab..440bf4ce1 100644 --- a/src/config/config_crypto.c +++ b/src/config/config_crypto.c @@ -33,6 +33,56 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); PROVIDE_REQUIRING_SYMBOL(); +/* RSA */ +#if defined ( CRYPTO_PUBKEY_RSA ) +REQUIRE_OBJECT ( oid_rsa ); +#endif + +/* MD4 */ +#if defined ( CRYPTO_DIGEST_MD4 ) +REQUIRE_OBJECT ( oid_md4 ); +#endif + +/* MD5 */ +#if defined ( CRYPTO_DIGEST_MD5 ) +REQUIRE_OBJECT ( oid_md5 ); +#endif + +/* SHA-1 */ +#if defined ( CRYPTO_DIGEST_SHA1 ) +REQUIRE_OBJECT ( oid_sha1 ); +#endif + +/* SHA-224 */ +#if defined ( CRYPTO_DIGEST_SHA224 ) +REQUIRE_OBJECT ( oid_sha224 ); +#endif + +/* SHA-256 */ +#if defined ( CRYPTO_DIGEST_SHA256 ) +REQUIRE_OBJECT ( oid_sha256 ); +#endif + +/* SHA-384 */ +#if defined ( CRYPTO_DIGEST_SHA384 ) +REQUIRE_OBJECT ( oid_sha384 ); +#endif + +/* SHA-512 */ +#if defined ( CRYPTO_DIGEST_SHA512 ) +REQUIRE_OBJECT ( oid_sha512 ); +#endif + +/* SHA-512/224 */ +#if defined ( CRYPTO_DIGEST_SHA512_224 ) +REQUIRE_OBJECT ( oid_sha512_224 ); +#endif + +/* SHA-512/256 */ +#if defined ( CRYPTO_DIGEST_SHA512_256 ) +REQUIRE_OBJECT ( oid_sha512_256 ); +#endif + /* RSA and MD5 */ #if defined ( CRYPTO_PUBKEY_RSA ) && defined ( CRYPTO_DIGEST_MD5 ) REQUIRE_OBJECT ( rsa_md5 ); diff --git a/src/config/config_ethernet.c b/src/config/config_ethernet.c index b5f7ddc9d..8a663c923 100644 --- a/src/config/config_ethernet.c +++ b/src/config/config_ethernet.c @@ -46,3 +46,6 @@ REQUIRE_OBJECT ( stp ); #ifdef NET_PROTO_LACP REQUIRE_OBJECT ( eth_slow ); #endif +#ifdef NET_PROTO_EAPOL +REQUIRE_OBJECT ( eapol ); +#endif diff --git a/src/config/config_usb.c b/src/config/config_usb.c index 17296d277..b679aeb27 100644 --- a/src/config/config_usb.c +++ b/src/config/config_usb.c @@ -53,6 +53,9 @@ REQUIRE_OBJECT ( usbio ); #ifdef USB_KEYBOARD REQUIRE_OBJECT ( usbkbd ); #endif +#ifdef USB_BLOCK +REQUIRE_OBJECT ( usbblk ); +#endif /* * Drag in USB external interfaces diff --git a/src/config/crypto.h b/src/config/crypto.h index 1edcdce45..7c0251758 100644 --- a/src/config/crypto.h +++ b/src/config/crypto.h @@ -9,31 +9,28 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +/** Minimum TLS version */ +#define TLS_VERSION_MIN TLS_VERSION_TLS_1_1 + /** RSA public-key algorithm */ #define CRYPTO_PUBKEY_RSA /** AES-CBC block cipher */ #define CRYPTO_CIPHER_AES_CBC -/** MD5 digest algorithm - * - * Note that use of MD5 is implicit when using TLSv1.1 or earlier. - */ -#define CRYPTO_DIGEST_MD5 +/** MD4 digest algorithm */ +//#define CRYPTO_DIGEST_MD4 -/** SHA-1 digest algorithm - * - * Note that use of SHA-1 is implicit when using TLSv1.1 or earlier. - */ +/** MD5 digest algorithm */ +//#define CRYPTO_DIGEST_MD5 + +/** SHA-1 digest algorithm */ #define CRYPTO_DIGEST_SHA1 /** SHA-224 digest algorithm */ #define CRYPTO_DIGEST_SHA224 -/** SHA-256 digest algorithm - * - * Note that use of SHA-256 is implicit when using TLSv1.2. - */ +/** SHA-256 digest algorithm */ #define CRYPTO_DIGEST_SHA256 /** SHA-384 digest algorithm */ @@ -42,6 +39,12 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); /** SHA-512 digest algorithm */ #define CRYPTO_DIGEST_SHA512 +/** SHA-512/224 digest algorithm */ +//#define CRYPTO_DIGEST_SHA512_224 + +/** SHA-512/256 digest algorithm */ +//#define CRYPTO_DIGEST_SHA512_256 + /** Margin of error (in seconds) allowed in signed timestamps * * We default to allowing a reasonable margin of error: 12 hours to diff --git a/src/config/defaults/efi.h b/src/config/defaults/efi.h index 53a7a7b4b..9ef34ab62 100644 --- a/src/config/defaults/efi.h +++ b/src/config/defaults/efi.h @@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define UACCESS_EFI #define IOMAP_VIRT #define PCIAPI_EFI +#define DMAAPI_OP #define CONSOLE_EFI #define TIMER_EFI #define UMALLOC_EFI @@ -24,6 +25,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ACPI_EFI #define FDT_EFI +#define NET_PROTO_IPV6 /* IPv6 protocol */ + #define DOWNLOAD_PROTO_FILE /* Local filesystem access */ #define IMAGE_EFI /* EFI image support */ @@ -39,6 +42,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define USB_HCD_EHCI /* EHCI USB host controller */ #define USB_HCD_UHCI /* UHCI USB host controller */ #define USB_EFI /* Provide EFI_USB_IO_PROTOCOL interface */ +#define USB_BLOCK /* USB block devices */ #define REBOOT_CMD /* Reboot command */ @@ -46,6 +50,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define IOAPI_X86 #define NAP_EFIX86 #define CPUID_CMD /* x86 CPU feature detection command */ +#define UNSAFE_STD /* Avoid setting direction flag */ #endif #if defined ( __arm__ ) || defined ( __aarch64__ ) diff --git a/src/config/defaults/linux.h b/src/config/defaults/linux.h index 75fd617f9..5c4106d30 100644 --- a/src/config/defaults/linux.h +++ b/src/config/defaults/linux.h @@ -20,6 +20,8 @@ FILE_LICENCE ( GPL2_OR_LATER ); #define TIME_LINUX #define REBOOT_NULL #define PCIAPI_LINUX +#define DMAAPI_FLAT +#define ACPI_LINUX #define DRIVERS_LINUX diff --git a/src/config/defaults/pcbios.h b/src/config/defaults/pcbios.h index 21821c95c..83835805a 100644 --- a/src/config/defaults/pcbios.h +++ b/src/config/defaults/pcbios.h @@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define UACCESS_LIBRM #define IOAPI_X86 #define PCIAPI_PCBIOS +#define DMAAPI_FLAT #define TIMER_PCBIOS #define CONSOLE_PCBIOS #define NAP_PCBIOS @@ -48,6 +49,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define USB_HCD_EHCI /* EHCI USB host controller */ #define USB_HCD_UHCI /* UHCI USB host controller */ #define USB_KEYBOARD /* USB keyboards */ +#define USB_BLOCK /* USB block devices */ #define REBOOT_CMD /* Reboot command */ #define CPUID_CMD /* x86 CPU feature detection command */ diff --git a/src/config/dhcp.h b/src/config/dhcp.h index bff5b56d6..adfa74a15 100644 --- a/src/config/dhcp.h +++ b/src/config/dhcp.h @@ -28,7 +28,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); * Maximum number of discovery deferrals due to blocked links * (e.g. from non-forwarding STP ports) */ -#define DHCP_DISC_MAX_DEFERRALS 60 +#define DHCP_DISC_MAX_DEFERRALS 180 /* * ProxyDHCP offers are given precedence by continue to wait for them diff --git a/src/config/general.h b/src/config/general.h index 3c14a2cd0..2d15f500a 100644 --- a/src/config/general.h +++ b/src/config/general.h @@ -35,10 +35,11 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); */ #define NET_PROTO_IPV4 /* IPv4 protocol */ -#undef NET_PROTO_IPV6 /* IPv6 protocol */ +//#define NET_PROTO_IPV6 /* IPv6 protocol */ #undef NET_PROTO_FCOE /* Fibre Channel over Ethernet protocol */ #define NET_PROTO_STP /* Spanning Tree protocol */ #define NET_PROTO_LACP /* Link Aggregation control protocol */ +#define NET_PROTO_EAPOL /* EAP over LAN protocol */ /* * PXE support @@ -116,6 +117,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define IMAGE_PNG /* PNG image support */ #define IMAGE_DER /* DER image support */ #define IMAGE_PEM /* PEM image support */ +//#define IMAGE_ZLIB /* ZLIB image support */ +//#define IMAGE_GZIP /* GZIP image support */ /* * Command-line commands to include @@ -154,6 +157,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); //#define PROFSTAT_CMD /* Profiling commands */ //#define NTP_CMD /* NTP commands */ //#define CERT_CMD /* Certificate management commands */ +//#define IMAGE_MEM_CMD /* Read memory command */ +#define IMAGE_ARCHIVE_CMD /* Archive image management commands */ /* * ROM-specific options diff --git a/src/config/ioapi.h b/src/config/ioapi.h index abe5a50ce..a1498482d 100644 --- a/src/config/ioapi.h +++ b/src/config/ioapi.h @@ -14,6 +14,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); //#undef PCIAPI_PCBIOS /* Access via PCI BIOS */ //#define PCIAPI_DIRECT /* Direct access via Type 1 accesses */ +#include +#include NAMED_CONFIG(ioapi.h) #include +#include LOCAL_NAMED_CONFIG(ioapi.h) #endif /* CONFIG_IOAPI_H */ diff --git a/src/config/qemu/ioapi.h b/src/config/qemu/ioapi.h new file mode 100644 index 000000000..e69de29bb diff --git a/src/config/rpi/ioapi.h b/src/config/rpi/ioapi.h new file mode 100644 index 000000000..e69de29bb diff --git a/src/config/usb.h b/src/config/usb.h index d2519d877..4252ec229 100644 --- a/src/config/usb.h +++ b/src/config/usb.h @@ -25,6 +25,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); * */ //#undef USB_KEYBOARD /* USB keyboards */ +//#undef USB_BLOCK /* USB block devices */ /* * USB external interfaces diff --git a/src/config/vbox/ioapi.h b/src/config/vbox/ioapi.h new file mode 100644 index 000000000..e69de29bb diff --git a/src/core/acpi.c b/src/core/acpi.c index e6912afa2..52eb63a04 100644 --- a/src/core/acpi.c +++ b/src/core/acpi.c @@ -35,6 +35,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); * */ +/** Colour for debug messages */ +#define colour FADT_SIGNATURE + /****************************************************************************** * * Utility functions @@ -80,13 +83,13 @@ void acpi_fix_checksum ( struct acpi_header *acpi ) { } /** - * Locate ACPI table + * Locate ACPI table via RSDT * * @v signature Requested table signature * @v index Requested index of table with this signature * @ret table Table, or UNULL if not found */ -userptr_t acpi_find ( uint32_t signature, unsigned int index ) { +userptr_t acpi_find_via_rsdt ( uint32_t signature, unsigned int index ) { struct acpi_header acpi; struct acpi_rsdt *rsdtab; typeof ( rsdtab->entry[0] ) entry; @@ -106,17 +109,17 @@ userptr_t acpi_find ( uint32_t signature, unsigned int index ) { /* Read RSDT header */ copy_from_user ( &acpi, rsdt, 0, sizeof ( acpi ) ); if ( acpi.signature != cpu_to_le32 ( RSDT_SIGNATURE ) ) { - DBGC ( rsdt, "RSDT %#08lx has invalid signature:\n", + DBGC ( colour, "RSDT %#08lx has invalid signature:\n", user_to_phys ( rsdt, 0 ) ); - DBGC_HDA ( rsdt, user_to_phys ( rsdt, 0 ), &acpi, + DBGC_HDA ( colour, user_to_phys ( rsdt, 0 ), &acpi, sizeof ( acpi ) ); return UNULL; } len = le32_to_cpu ( acpi.length ); if ( len < sizeof ( rsdtab->acpi ) ) { - DBGC ( rsdt, "RSDT %#08lx has invalid length:\n", + DBGC ( colour, "RSDT %#08lx has invalid length:\n", user_to_phys ( rsdt, 0 ) ); - DBGC_HDA ( rsdt, user_to_phys ( rsdt, 0 ), &acpi, + DBGC_HDA ( colour, user_to_phys ( rsdt, 0 ), &acpi, sizeof ( acpi ) ); return UNULL; } @@ -147,20 +150,20 @@ userptr_t acpi_find ( uint32_t signature, unsigned int index ) { /* Check table integrity */ if ( acpi_checksum ( table ) != 0 ) { - DBGC ( rsdt, "RSDT %#08lx found %s with bad checksum " - "at %08lx\n", user_to_phys ( rsdt, 0 ), + DBGC ( colour, "RSDT %#08lx found %s with bad " + "checksum at %08lx\n", user_to_phys ( rsdt, 0 ), acpi_name ( signature ), user_to_phys ( table, 0 ) ); break; } - DBGC ( rsdt, "RSDT %#08lx found %s at %08lx\n", + DBGC ( colour, "RSDT %#08lx found %s at %08lx\n", user_to_phys ( rsdt, 0 ), acpi_name ( signature ), user_to_phys ( table, 0 ) ); return table; } - DBGC ( rsdt, "RSDT %#08lx could not find %s\n", + DBGC ( colour, "RSDT %#08lx could not find %s\n", user_to_phys ( rsdt, 0 ), acpi_name ( signature ) ); return UNULL; } @@ -256,20 +259,12 @@ static int acpi_sx_zsdt ( userptr_t zsdt, uint32_t signature ) { */ int acpi_sx ( uint32_t signature ) { struct acpi_fadt fadtab; - userptr_t rsdt; userptr_t fadt; userptr_t dsdt; userptr_t ssdt; unsigned int i; int sx; - /* Locate RSDT */ - rsdt = acpi_find_rsdt(); - if ( ! rsdt ) { - DBG ( "RSDT not found\n" ); - return -ENOENT; - } - /* Try DSDT first */ fadt = acpi_find ( FADT_SIGNATURE, 0 ); if ( fadt ) { @@ -288,8 +283,8 @@ int acpi_sx ( uint32_t signature ) { return sx; } - DBGC ( rsdt, "RSDT %#08lx could not find \\_Sx \"%s\"\n", - user_to_phys ( rsdt, 0 ), acpi_name ( signature ) ); + DBGC ( colour, "ACPI could not find \\_Sx \"%s\"\n", + acpi_name ( signature ) ); return -ENOENT; } diff --git a/src/core/archive.c b/src/core/archive.c new file mode 100644 index 000000000..bb62c7e47 --- /dev/null +++ b/src/core/archive.c @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * Archive images + * + */ + +/** + * Extract archive image + * + * @v image Image + * @v name Extracted image name + * @v extracted Extracted image to fill in + * @ret rc Return status code + */ +int image_extract ( struct image *image, const char *name, + struct image **extracted ) { + char *dot; + int rc; + + /* Check that this image can be used to extract an archive image */ + if ( ! ( image->type && image->type->extract ) ) { + rc = -ENOTSUP; + goto err_unsupported; + } + + /* Allocate new image */ + *extracted = alloc_image ( image->uri ); + if ( ! *extracted ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Set image name */ + if ( ( rc = image_set_name ( *extracted, + ( name ? name : image->name ) ) ) != 0 ) { + goto err_set_name; + } + + /* Strip any archive or compression suffix from implicit name */ + if ( ( ! name ) && ( (*extracted)->name ) && + ( ( dot = strrchr ( (*extracted)->name, '.' ) ) != NULL ) ) { + *dot = '\0'; + } + + /* Try extracting archive image */ + if ( ( rc = image->type->extract ( image, *extracted ) ) != 0 ) { + DBGC ( image, "IMAGE %s could not extract image: %s\n", + image->name, strerror ( rc ) ); + goto err_extract; + } + + /* Register image */ + if ( ( rc = register_image ( *extracted ) ) != 0 ) + goto err_register; + + /* Propagate trust flag */ + if ( image->flags & IMAGE_TRUSTED ) + image_trust ( *extracted ); + + /* Drop local reference to image */ + image_put ( *extracted ); + + return 0; + + unregister_image ( *extracted ); + err_register: + err_extract: + err_set_name: + image_put ( *extracted ); + err_alloc: + err_unsupported: + return rc; +} + +/** + * Extract and execute image + * + * @v image Image + * @ret rc Return status code + */ +int image_extract_exec ( struct image *image ) { + struct image *extracted; + int rc; + + /* Extract image */ + if ( ( rc = image_extract ( image, NULL, &extracted ) ) != 0 ) + goto err_extract; + + /* Set image command line */ + if ( ( rc = image_set_cmdline ( extracted, image->cmdline ) ) != 0 ) + goto err_set_cmdline; + + /* Set auto-unregister flag */ + extracted->flags |= IMAGE_AUTO_UNREGISTER; + + /* Tail-recurse into extracted image */ + return image_exec ( extracted ); + + err_set_cmdline: + unregister_image ( extracted ); + err_extract: + return rc; +} + +/* Drag in objects via image_extract() */ +REQUIRING_SYMBOL ( image_extract ); + +/* Drag in archive image formats */ +REQUIRE_OBJECT ( config_archive ); diff --git a/src/core/base64.c b/src/core/base64.c index e452f7d41..ec11be261 100644 --- a/src/core/base64.c +++ b/src/core/base64.c @@ -36,7 +36,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); * */ -static const char base64[64] = +static const char base64[ 64 + 1 /* NUL */ ] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; /** diff --git a/src/core/blocktrans.c b/src/core/blocktrans.c index 3f32f9cf8..f9dcb95d2 100644 --- a/src/core/blocktrans.c +++ b/src/core/blocktrans.c @@ -242,9 +242,7 @@ int block_translate ( struct interface *block, userptr_t buffer, size_t size ) { } /* Attach to interfaces, mortalise self, and return */ - assert ( block->dest != &null_intf ); - intf_plug_plug ( &blktrans->xfer, block->dest ); - intf_plug_plug ( &blktrans->block, block ); + intf_insert ( block, &blktrans->block, &blktrans->xfer ); ref_put ( &blktrans->refcnt ); DBGC2 ( blktrans, "BLKTRANS %p created", blktrans ); diff --git a/src/core/cachedhcp.c b/src/core/cachedhcp.c new file mode 100644 index 000000000..2fa9b0c73 --- /dev/null +++ b/src/core/cachedhcp.c @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2013 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Cached DHCP packet + * + */ + +/** A cached DHCP packet */ +struct cached_dhcp_packet { + /** Settings block name */ + const char *name; + /** DHCP packet (if any) */ + struct dhcp_packet *dhcppkt; +}; + +/** Cached DHCPACK */ +struct cached_dhcp_packet cached_dhcpack = { + .name = DHCP_SETTINGS_NAME, +}; + +/** Cached ProxyDHCPOFFER */ +struct cached_dhcp_packet cached_proxydhcp = { + .name = PROXYDHCP_SETTINGS_NAME, +}; + +/** Cached PXEBSACK */ +struct cached_dhcp_packet cached_pxebs = { + .name = PXEBS_SETTINGS_NAME, +}; + +/** List of cached DHCP packets */ +static struct cached_dhcp_packet *cached_packets[] = { + &cached_dhcpack, + &cached_proxydhcp, + &cached_pxebs, +}; + +/** Colour for debug messages */ +#define colour &cached_dhcpack + +/** + * Free cached DHCP packet + * + * @v cache Cached DHCP packet + */ +static void cachedhcp_free ( struct cached_dhcp_packet *cache ) { + + dhcppkt_put ( cache->dhcppkt ); + cache->dhcppkt = NULL; +} + +/** + * Apply cached DHCP packet settings + * + * @v cache Cached DHCP packet + * @v netdev Network device, or NULL + * @ret rc Return status code + */ +static int cachedhcp_apply ( struct cached_dhcp_packet *cache, + struct net_device *netdev ) { + struct settings *settings; + int rc; + + /* Do nothing if cache is empty */ + if ( ! cache->dhcppkt ) + return 0; + + /* Do nothing unless cached packet's MAC address matches this + * network device, if specified. + */ + if ( netdev ) { + if ( memcmp ( netdev->ll_addr, cache->dhcppkt->dhcphdr->chaddr, + netdev->ll_protocol->ll_addr_len ) != 0 ) { + DBGC ( colour, "CACHEDHCP %s does not match %s\n", + cache->name, netdev->name ); + return 0; + } + DBGC ( colour, "CACHEDHCP %s is for %s\n", + cache->name, netdev->name ); + } + + /* Select appropriate parent settings block */ + settings = ( netdev ? netdev_settings ( netdev ) : NULL ); + + /* Register settings */ + if ( ( rc = register_settings ( &cache->dhcppkt->settings, settings, + cache->name ) ) != 0 ) { + DBGC ( colour, "CACHEDHCP %s could not register settings: %s\n", + cache->name, strerror ( rc ) ); + return rc; + } + + /* Free cached DHCP packet */ + cachedhcp_free ( cache ); + + return 0; +} + +/** + * Record cached DHCP packet + * + * @v cache Cached DHCP packet + * @v data DHCPACK packet buffer + * @v max_len Maximum possible length + * @ret rc Return status code + */ +int cachedhcp_record ( struct cached_dhcp_packet *cache, userptr_t data, + size_t max_len ) { + struct dhcp_packet *dhcppkt; + struct dhcp_packet *tmp; + struct dhcphdr *dhcphdr; + unsigned int i; + size_t len; + + /* Free any existing cached packet */ + cachedhcp_free ( cache ); + + /* Allocate and populate DHCP packet */ + dhcppkt = zalloc ( sizeof ( *dhcppkt ) + max_len ); + if ( ! dhcppkt ) { + DBGC ( colour, "CACHEDHCP %s could not allocate copy\n", + cache->name ); + return -ENOMEM; + } + dhcphdr = ( ( ( void * ) dhcppkt ) + sizeof ( *dhcppkt ) ); + copy_from_user ( dhcphdr, data, 0, max_len ); + dhcppkt_init ( dhcppkt, dhcphdr, max_len ); + + /* Shrink packet to required length. If reallocation fails, + * just continue to use the original packet and waste the + * unused space. + */ + len = dhcppkt_len ( dhcppkt ); + assert ( len <= max_len ); + tmp = realloc ( dhcppkt, ( sizeof ( *dhcppkt ) + len ) ); + if ( tmp ) + dhcppkt = tmp; + + /* Reinitialise packet at new address */ + dhcphdr = ( ( ( void * ) dhcppkt ) + sizeof ( *dhcppkt ) ); + dhcppkt_init ( dhcppkt, dhcphdr, len ); + + /* Discard duplicate packets, since some PXE stacks (including + * iPXE itself) will report the DHCPACK packet as the PXEBSACK + * if no separate PXEBSACK exists. + */ + for ( i = 0 ; i < ( sizeof ( cached_packets ) / + sizeof ( cached_packets[0] ) ) ; i++ ) { + tmp = cached_packets[i]->dhcppkt; + if ( tmp && ( dhcppkt_len ( tmp ) == len ) && + ( memcmp ( tmp->dhcphdr, dhcppkt->dhcphdr, len ) == 0 ) ) { + DBGC ( colour, "CACHEDHCP %s duplicates %s\n", + cache->name, cached_packets[i]->name ); + dhcppkt_put ( dhcppkt ); + return -EEXIST; + } + } + + /* Store as cached packet */ + DBGC ( colour, "CACHEDHCP %s at %#08lx+%#zx/%#zx\n", cache->name, + user_to_phys ( data, 0 ), len, max_len ); + cache->dhcppkt = dhcppkt; + + return 0; +} + +/** + * Cached DHCPACK startup function + * + */ +static void cachedhcp_startup ( void ) { + + /* Apply cached ProxyDHCPOFFER, if any */ + cachedhcp_apply ( &cached_proxydhcp, NULL ); + + /* Apply cached PXEBSACK, if any */ + cachedhcp_apply ( &cached_pxebs, NULL ); + + /* Free any remaining cached packets */ + if ( cached_dhcpack.dhcppkt ) { + DBGC ( colour, "CACHEDHCP %s unclaimed\n", + cached_dhcpack.name ); + } + cachedhcp_free ( &cached_dhcpack ); + cachedhcp_free ( &cached_proxydhcp ); + cachedhcp_free ( &cached_pxebs ); +} + +/** Cached DHCPACK startup function */ +struct startup_fn cachedhcp_startup_fn __startup_fn ( STARTUP_LATE ) = { + .name = "cachedhcp", + .startup = cachedhcp_startup, +}; + +/** + * Apply cached DHCPACK to network device, if applicable + * + * @v netdev Network device + * @ret rc Return status code + */ +static int cachedhcp_probe ( struct net_device *netdev ) { + + /* Apply cached DHCPACK to network device, if applicable */ + return cachedhcp_apply ( &cached_dhcpack, netdev ); +} + +/** Cached DHCP packet network device driver */ +struct net_driver cachedhcp_driver __net_driver = { + .name = "cachedhcp", + .probe = cachedhcp_probe, +}; diff --git a/src/core/console.c b/src/core/console.c index 7fd00036f..2b90809bf 100644 --- a/src/core/console.c +++ b/src/core/console.c @@ -20,11 +20,12 @@ unsigned int console_height = CONSOLE_DEFAULT_HEIGHT; * Write a single character to each console device * * @v character Character to be written + * @ret character Character written * * The character is written out to all enabled console devices, using * each device's console_driver::putchar() method. */ -void putchar ( int character ) { +int putchar ( int character ) { struct console_driver *console; /* Automatic LF -> CR,LF translation */ @@ -37,6 +38,8 @@ void putchar ( int character ) { console->putchar ) console->putchar ( character ); } + + return character; } /** diff --git a/src/core/cpio.c b/src/core/cpio.c index 080c72daf..27aee7581 100644 --- a/src/core/cpio.c +++ b/src/core/cpio.c @@ -30,6 +30,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); */ #include +#include #include #include @@ -45,3 +46,87 @@ void cpio_set_field ( char *field, unsigned long value ) { snprintf ( buf, sizeof ( buf ), "%08lx", value ); memcpy ( field, buf, 8 ); } + +/** + * Get CPIO image filename + * + * @v image Image + * @ret len CPIO filename length (0 for no filename) + */ +size_t cpio_name_len ( struct image *image ) { + const char *name = cpio_name ( image ); + char *sep; + size_t len; + + /* Check for existence of CPIO filename */ + if ( ! name ) + return 0; + + /* Locate separator (if any) */ + sep = strchr ( name, ' ' ); + len = ( sep ? ( ( size_t ) ( sep - name ) ) : strlen ( name ) ); + + return len; +} + +/** + * Parse CPIO image parameters + * + * @v image Image + * @v cpio CPIO header to fill in + */ +static void cpio_parse_cmdline ( struct image *image, + struct cpio_header *cpio ) { + const char *cmdline; + char *arg; + char *end; + unsigned int mode; + + /* Skip image filename */ + cmdline = ( cpio_name ( image ) + cpio_name_len ( image ) ); + + /* Look for "mode=" */ + if ( ( arg = strstr ( cmdline, "mode=" ) ) ) { + arg += 5; + mode = strtoul ( arg, &end, 8 /* Octal for file mode */ ); + if ( *end && ( *end != ' ' ) ) { + DBGC ( image, "CPIO %p strange \"mode=\" " + "terminator '%c'\n", image, *end ); + } + cpio_set_field ( cpio->c_mode, ( 0100000 | mode ) ); + } +} + +/** + * Construct CPIO header for image, if applicable + * + * @v image Image + * @v cpio CPIO header to fill in + * @ret len Length of magic CPIO header (including filename) + */ +size_t cpio_header ( struct image *image, struct cpio_header *cpio ) { + size_t name_len; + size_t len; + + /* Get filename length */ + name_len = cpio_name_len ( image ); + + /* Images with no filename are assumed to already be CPIO archives */ + if ( ! name_len ) + return 0; + + /* Construct CPIO header */ + memset ( cpio, '0', sizeof ( *cpio ) ); + memcpy ( cpio->c_magic, CPIO_MAGIC, sizeof ( cpio->c_magic ) ); + cpio_set_field ( cpio->c_mode, 0100644 ); + cpio_set_field ( cpio->c_nlink, 1 ); + cpio_set_field ( cpio->c_filesize, image->len ); + cpio_set_field ( cpio->c_namesize, ( name_len + 1 /* NUL */ ) ); + cpio_parse_cmdline ( image, cpio ); + + /* Calculate total length */ + len = ( ( sizeof ( *cpio ) + name_len + 1 /* NUL */ + CPIO_ALIGN - 1 ) + & ~( CPIO_ALIGN - 1 ) ); + + return len; +} diff --git a/src/core/dma.c b/src/core/dma.c new file mode 100644 index 000000000..5d6868216 --- /dev/null +++ b/src/core/dma.c @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * DMA mappings + * + */ + +/****************************************************************************** + * + * Flat address space DMA API + * + ****************************************************************************** + */ + +PROVIDE_DMAAPI_INLINE ( flat, dma_map ); +PROVIDE_DMAAPI_INLINE ( flat, dma_unmap ); +PROVIDE_DMAAPI_INLINE ( flat, dma_alloc ); +PROVIDE_DMAAPI_INLINE ( flat, dma_free ); +PROVIDE_DMAAPI_INLINE ( flat, dma_umalloc ); +PROVIDE_DMAAPI_INLINE ( flat, dma_ufree ); +PROVIDE_DMAAPI_INLINE ( flat, dma_set_mask ); +PROVIDE_DMAAPI_INLINE ( flat, dma_phys ); + +/****************************************************************************** + * + * Operations-based DMA API + * + ****************************************************************************** + */ + +/** + * Map buffer for DMA + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v addr Buffer address + * @v len Length of buffer + * @v flags Mapping flags + * @ret rc Return status code + */ +static int dma_op_map ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ) { + struct dma_operations *op = dma->op; + + if ( ! op ) + return -ENODEV; + return op->map ( dma, map, addr, len, flags ); +} + +/** + * Unmap buffer + * + * @v map DMA mapping + */ +static void dma_op_unmap ( struct dma_mapping *map ) { + struct dma_device *dma = map->dma; + + assert ( dma != NULL ); + assert ( dma->op != NULL ); + dma->op->unmap ( dma, map ); +} + +/** + * Allocate and map DMA-coherent buffer + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +static void * dma_op_alloc ( struct dma_device *dma, struct dma_mapping *map, + size_t len, size_t align ) { + struct dma_operations *op = dma->op; + + if ( ! op ) + return NULL; + return op->alloc ( dma, map, len, align ); +} + +/** + * Unmap and free DMA-coherent buffer + * + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +static void dma_op_free ( struct dma_mapping *map, void *addr, size_t len ) { + struct dma_device *dma = map->dma; + + assert ( dma != NULL ); + assert ( dma->op != NULL ); + dma->op->free ( dma, map, addr, len ); +} + +/** + * Allocate and map DMA-coherent buffer from external (user) memory + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +static userptr_t dma_op_umalloc ( struct dma_device *dma, + struct dma_mapping *map, + size_t len, size_t align ) { + struct dma_operations *op = dma->op; + + if ( ! op ) + return UNULL; + return op->umalloc ( dma, map, len, align ); +} + +/** + * Unmap and free DMA-coherent buffer from external (user) memory + * + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +static void dma_op_ufree ( struct dma_mapping *map, userptr_t addr, + size_t len ) { + struct dma_device *dma = map->dma; + + assert ( dma != NULL ); + assert ( dma->op != NULL ); + dma->op->ufree ( dma, map, addr, len ); +} + +/** + * Set addressable space mask + * + * @v dma DMA device + * @v mask Addressable space mask + */ +static void dma_op_set_mask ( struct dma_device *dma, physaddr_t mask ) { + struct dma_operations *op = dma->op; + + if ( op ) + op->set_mask ( dma, mask ); +} + +PROVIDE_DMAAPI ( op, dma_map, dma_op_map ); +PROVIDE_DMAAPI ( op, dma_unmap, dma_op_unmap ); +PROVIDE_DMAAPI ( op, dma_alloc, dma_op_alloc ); +PROVIDE_DMAAPI ( op, dma_free, dma_op_free ); +PROVIDE_DMAAPI ( op, dma_umalloc, dma_op_umalloc ); +PROVIDE_DMAAPI ( op, dma_ufree, dma_op_ufree ); +PROVIDE_DMAAPI ( op, dma_set_mask, dma_op_set_mask ); +PROVIDE_DMAAPI_INLINE ( op, dma_phys ); diff --git a/src/core/image.c b/src/core/image.c index 078ce1bb9..ce8cf868b 100644 --- a/src/core/image.c +++ b/src/core/image.c @@ -175,6 +175,47 @@ int image_set_cmdline ( struct image *image, const char *cmdline ) { return 0; } +/** + * Set image length + * + * @v image Image + * @v len Length of image data + * @ret rc Return status code + */ +int image_set_len ( struct image *image, size_t len ) { + userptr_t new; + + /* (Re)allocate image data */ + new = urealloc ( image->data, len ); + if ( ! new ) + return -ENOMEM; + image->data = new; + image->len = len; + + return 0; +} + +/** + * Set image data + * + * @v image Image + * @v data Image data + * @v len Length of image data + * @ret rc Return status code + */ +int image_set_data ( struct image *image, userptr_t data, size_t len ) { + int rc; + + /* Set image length */ + if ( ( rc = image_set_len ( image, len ) ) != 0 ) + return rc; + + /* Copy in new image data */ + memcpy_user ( image->data, 0, data, 0, len ); + + return 0; +} + /** * Determine image type * @@ -481,3 +522,47 @@ int image_set_trust ( int require_trusted, int permanent ) { return 0; } + +/** + * Create registered image from block of memory + * + * @v name Name + * @v data Image data + * @v len Length + * @ret image Image, or NULL on error + */ +struct image * image_memory ( const char *name, userptr_t data, size_t len ) { + struct image *image; + int rc; + + /* Allocate image */ + image = alloc_image ( NULL ); + if ( ! image ) { + rc = -ENOMEM; + goto err_alloc_image; + } + + /* Set name */ + if ( ( rc = image_set_name ( image, name ) ) != 0 ) + goto err_set_name; + + /* Set data */ + if ( ( rc = image_set_data ( image, data, len ) ) != 0 ) + goto err_set_data; + + /* Register image */ + if ( ( rc = register_image ( image ) ) != 0 ) + goto err_register; + + /* Drop local reference to image */ + image_put ( image ); + + return image; + + err_register: + err_set_data: + err_set_name: + image_put ( image ); + err_alloc_image: + return NULL; +} diff --git a/src/core/interface.c b/src/core/interface.c index 402aa4541..34a4180a5 100644 --- a/src/core/interface.c +++ b/src/core/interface.c @@ -81,9 +81,14 @@ struct interface null_intf = INTF_INIT ( null_intf_desc ); * interface is updated to point to the new destination interface. */ void intf_plug ( struct interface *intf, struct interface *dest ) { + + if ( intf == &null_intf ) + return; + DBGC ( INTF_COL ( intf ), "INTF " INTF_INTF_FMT " replug to " INTF_FMT "\n", INTF_INTF_DBG ( intf, intf->dest ), INTF_DBG ( dest ) ); + intf_get ( dest ); intf_put ( intf->dest ); intf->dest = dest; @@ -385,6 +390,23 @@ void intfs_restart ( int rc, ... ) { va_end ( intfs ); } +/** + * Insert a filter interface + * + * @v intf Object interface + * @v upper Upper end of filter + * @v lower Lower end of filter + */ +void intf_insert ( struct interface *intf, struct interface *upper, + struct interface *lower ) { + struct interface *dest = intf->dest; + + intf_get ( dest ); + intf_plug_plug ( intf, upper ); + intf_plug_plug ( lower, dest ); + intf_put ( dest ); +} + /** * Poke an object interface * diff --git a/src/core/iobuf.c b/src/core/iobuf.c index 0ee53e038..c9970bc76 100644 --- a/src/core/iobuf.c +++ b/src/core/iobuf.c @@ -88,8 +88,8 @@ struct io_buffer * alloc_iob_raw ( size_t len, size_t align, size_t offset ) { len += ( ( - len - offset ) & ( __alignof__ ( *iobuf ) - 1 ) ); /* Allocate memory for buffer plus descriptor */ - data = malloc_dma_offset ( len + sizeof ( *iobuf ), align, - offset ); + data = malloc_phys_offset ( len + sizeof ( *iobuf ), align, + offset ); if ( ! data ) return NULL; iobuf = ( data + len ); @@ -97,19 +97,20 @@ struct io_buffer * alloc_iob_raw ( size_t len, size_t align, size_t offset ) { } else { /* Allocate memory for buffer */ - data = malloc_dma_offset ( len, align, offset ); + data = malloc_phys_offset ( len, align, offset ); if ( ! data ) return NULL; /* Allocate memory for descriptor */ iobuf = malloc ( sizeof ( *iobuf ) ); if ( ! iobuf ) { - free_dma ( data, len ); + free_phys ( data, len ); return NULL; } } /* Populate descriptor */ + memset ( &iobuf->map, 0, sizeof ( iobuf->map ) ); iobuf->head = iobuf->data = iobuf->tail = data; iobuf->end = ( data + len ); @@ -153,22 +154,66 @@ void free_iob ( struct io_buffer *iobuf ) { assert ( iobuf->head <= iobuf->data ); assert ( iobuf->data <= iobuf->tail ); assert ( iobuf->tail <= iobuf->end ); + assert ( ! dma_mapped ( &iobuf->map ) ); /* Free buffer */ len = ( iobuf->end - iobuf->head ); if ( iobuf->end == iobuf ) { /* Descriptor is inline */ - free_dma ( iobuf->head, ( len + sizeof ( *iobuf ) ) ); + free_phys ( iobuf->head, ( len + sizeof ( *iobuf ) ) ); } else { /* Descriptor is detached */ - free_dma ( iobuf->head, len ); + free_phys ( iobuf->head, len ); free ( iobuf ); } } +/** + * Allocate and map I/O buffer for receive DMA + * + * @v len Length of I/O buffer + * @v dma DMA device + * @ret iobuf I/O buffer, or NULL on error + */ +struct io_buffer * alloc_rx_iob ( size_t len, struct dma_device *dma ) { + struct io_buffer *iobuf; + int rc; + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( len ); + if ( ! iobuf ) + goto err_alloc; + + /* Map I/O buffer */ + if ( ( rc = iob_map_rx ( iobuf, dma ) ) != 0 ) + goto err_map; + + return iobuf; + + iob_unmap ( iobuf ); + err_map: + free_iob ( iobuf ); + err_alloc: + return NULL; +} + +/** + * Unmap and free I/O buffer for receive DMA + * + * @v iobuf I/O buffer + */ +void free_rx_iob ( struct io_buffer *iobuf ) { + + /* Unmap I/O buffer */ + iob_unmap ( iobuf ); + + /* Free I/O buffer */ + free_iob ( iobuf ); +} + /** * Ensure I/O buffer has sufficient headroom * diff --git a/src/core/malloc.c b/src/core/malloc.c index 0a7843a14..8499ab45a 100644 --- a/src/core/malloc.c +++ b/src/core/malloc.c @@ -596,8 +596,8 @@ void * malloc ( size_t size ) { * * @v ptr Memory allocated by malloc(), or NULL * - * Memory allocated with malloc_dma() cannot be freed with free(); it - * must be freed with free_dma() instead. + * Memory allocated with malloc_phys() cannot be freed with free(); it + * must be freed with free_phys() instead. * * If @c ptr is NULL, no action is taken. */ diff --git a/src/core/null_acpi.c b/src/core/null_acpi.c index 90c784855..acca37872 100644 --- a/src/core/null_acpi.c +++ b/src/core/null_acpi.c @@ -1,3 +1,3 @@ #include -PROVIDE_ACPI_INLINE ( null, acpi_find_rsdt ); +PROVIDE_ACPI_INLINE ( null, acpi_find ); diff --git a/src/core/open.c b/src/core/open.c index 9d665ffda..f9198c9d9 100644 --- a/src/core/open.c +++ b/src/core/open.c @@ -25,6 +25,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include +#include #include #include #include @@ -47,7 +48,7 @@ struct uri_opener * xfer_uri_opener ( const char *scheme ) { struct uri_opener *opener; for_each_table_entry ( opener, URI_OPENERS ) { - if ( strcmp ( scheme, opener->scheme ) == 0 ) + if ( strcasecmp ( scheme, opener->scheme ) == 0 ) return opener; } return NULL; @@ -147,10 +148,8 @@ int xfer_open_socket ( struct interface *intf, int semantics, socket_family_name ( peer->sa_family ) ); for_each_table_entry ( opener, SOCKET_OPENERS ) { - if ( ( opener->semantics == semantics ) && - ( opener->family == peer->sa_family ) ) { + if ( opener->semantics == semantics ) return opener->open ( intf, peer, local ); - } } DBGC ( INTF_COL ( intf ), "INTF " INTF_FMT " attempted to open " diff --git a/src/core/parseopt.c b/src/core/parseopt.c index 3ddf94f3d..007080088 100644 --- a/src/core/parseopt.c +++ b/src/core/parseopt.c @@ -93,7 +93,7 @@ int parse_integer ( char *text, unsigned int *value ) { /* Parse integer */ *value = strtoul ( text, &endp, 0 ); - if ( *endp ) { + if ( *endp || ( ! *text ) ) { printf ( "\"%s\": invalid integer value\n", text ); return -EINVAL_INTEGER; } diff --git a/src/core/settings.c b/src/core/settings.c index 3e5d416e7..430cdc84b 100644 --- a/src/core/settings.c +++ b/src/core/settings.c @@ -370,12 +370,14 @@ const char * settings_name ( struct settings *settings ) { static struct settings * parse_settings_name ( const char *name, get_child_settings_t get_child ) { struct settings *settings = &settings_root; - char name_copy[ strlen ( name ) + 1 ]; + char *name_copy; char *subname; char *remainder; /* Create modifiable copy of name */ - memcpy ( name_copy, name, sizeof ( name_copy ) ); + name_copy = strdup ( name ); + if ( ! name_copy ) + return NULL; remainder = name_copy; /* Parse each name component in turn */ @@ -389,6 +391,9 @@ parse_settings_name ( const char *name, get_child_settings_t get_child ) { break; } + /* Free modifiable copy of name */ + free ( name_copy ); + return settings; } diff --git a/src/core/string.c b/src/core/string.c index 5bd9dae8b..9a1b9b72a 100644 --- a/src/core/string.c +++ b/src/core/string.c @@ -27,6 +27,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include /** @file @@ -52,7 +53,7 @@ void * generic_memset ( void *dest, int character, size_t len ) { } /** - * Copy memory region + * Copy memory region (forwards) * * @v dest Destination region * @v src Source region @@ -68,6 +69,23 @@ void * generic_memcpy ( void *dest, const void *src, size_t len ) { return dest; } +/** + * Copy memory region (backwards) + * + * @v dest Destination region + * @v src Source region + * @v len Length + * @ret dest Destination region + */ +void * generic_memcpy_reverse ( void *dest, const void *src, size_t len ) { + const uint8_t *src_bytes = ( src + len ); + uint8_t *dest_bytes = ( dest + len ); + + while ( len-- ) + *(--dest_bytes) = *(--src_bytes); + return dest; +} + /** * Copy (possibly overlapping) memory region * @@ -77,14 +95,12 @@ void * generic_memcpy ( void *dest, const void *src, size_t len ) { * @ret dest Destination region */ void * generic_memmove ( void *dest, const void *src, size_t len ) { - const uint8_t *src_bytes = ( src + len ); - uint8_t *dest_bytes = ( dest + len ); - if ( dest < src ) + if ( dest < src ) { return generic_memcpy ( dest, src, len ); - while ( len-- ) - *(--dest_bytes) = *(--src_bytes); - return dest; + } else { + return generic_memcpy_reverse ( dest, src, len ); + } } /** @@ -101,7 +117,7 @@ int memcmp ( const void *first, const void *second, size_t len ) { int diff; while ( len-- ) { - diff = ( *(second_bytes++) - *(first_bytes++) ); + diff = ( *(first_bytes++) - *(second_bytes++) ); if ( diff ) return diff; } @@ -190,11 +206,24 @@ int strncmp ( const char *first, const char *second, size_t max ) { * @ret diff Difference */ int strcasecmp ( const char *first, const char *second ) { + + return strncasecmp ( first, second, ~( ( size_t ) 0 ) ); +} + +/** + * Compare case-insensitive strings + * + * @v first First string + * @v second Second string + * @v max Maximum length to compare + * @ret diff Difference + */ +int strncasecmp ( const char *first, const char *second, size_t max ) { const uint8_t *first_bytes = ( ( const uint8_t * ) first ); const uint8_t *second_bytes = ( ( const uint8_t * ) second ); int diff; - for ( ; ; first_bytes++, second_bytes++ ) { + for ( ; max-- ; first_bytes++, second_bytes++ ) { diff = ( toupper ( *first_bytes ) - toupper ( *second_bytes ) ); if ( diff ) @@ -202,6 +231,7 @@ int strcasecmp ( const char *first, const char *second ) { if ( ! *first_bytes ) return 0; } + return 0; } /** diff --git a/src/core/uri.c b/src/core/uri.c index 73ad2b227..e9e512ab4 100644 --- a/src/core/uri.c +++ b/src/core/uri.c @@ -413,8 +413,8 @@ struct uri * parse_uri ( const char *uri_string ) { } /* Split host into host[:port] */ - if ( ( uri->host[ strlen ( uri->host ) - 1 ] != ']' ) && - ( tmp = strrchr ( uri->host, ':' ) ) ) { + if ( ( tmp = strrchr ( uri->host, ':' ) ) && + ( uri->host[ strlen ( uri->host ) - 1 ] != ']' ) ) { *(tmp++) = '\0'; uri->port = tmp; } diff --git a/src/crypto/certstore.c b/src/crypto/certstore.c index cdf6fb4dd..2676c7e1e 100644 --- a/src/crypto/certstore.c +++ b/src/crypto/certstore.c @@ -116,13 +116,13 @@ struct x509_certificate * certstore_find ( struct asn1_cursor *raw ) { * @v key Private key * @ret cert X.509 certificate, or NULL if not found */ -struct x509_certificate * certstore_find_key ( struct asn1_cursor *key ) { +struct x509_certificate * certstore_find_key ( struct private_key *key ) { struct x509_certificate *cert; /* Search for certificate within store */ list_for_each_entry ( cert, &certstore.links, store.list ) { if ( pubkey_match ( cert->signature_algorithm->pubkey, - key->data, key->len, + key->builder.data, key->builder.len, cert->subject.public_key.raw.data, cert->subject.public_key.raw.len ) == 0 ) return certstore_found ( cert ); diff --git a/src/crypto/cms.c b/src/crypto/cms.c index bc2148e8a..9511cec8a 100644 --- a/src/crypto/cms.c +++ b/src/crypto/cms.c @@ -76,7 +76,7 @@ static uint8_t oid_signeddata[] = { ASN1_OID_SIGNEDDATA }; /** "pkcs7-signedData" object identifier cursor */ static struct asn1_cursor oid_signeddata_cursor = - ASN1_OID_CURSOR ( oid_signeddata ); + ASN1_CURSOR ( oid_signeddata ); /** * Parse CMS signature content type diff --git a/src/crypto/deflate.c b/src/crypto/deflate.c index e1c87d5fe..7ad39ec1b 100644 --- a/src/crypto/deflate.c +++ b/src/crypto/deflate.c @@ -56,7 +56,7 @@ static uint8_t deflate_reverse[256]; * does not fit the pattern (it represents a length of 258; following * the pattern from the earlier codes would give a length of 259), and * has no extra bits. Codes 286-287 are invalid, but can occur. We - * treat any code greater than 284 as meaning "length 285, no extra + * treat any code greater than 284 as meaning "length 258, no extra * bits". */ static uint8_t deflate_litlen_base[28]; diff --git a/src/crypto/md4.c b/src/crypto/md4.c index f4a8d78df..ca5dcc21b 100644 --- a/src/crypto/md4.c +++ b/src/crypto/md4.c @@ -35,7 +35,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** MD4 variables */ @@ -268,13 +267,3 @@ struct digest_algorithm md4_algorithm = { .update = md4_update, .final = md4_final, }; - -/** "md4" object identifier */ -static uint8_t oid_md4[] = { ASN1_OID_MD4 }; - -/** "md4" OID-identified algorithm */ -struct asn1_algorithm oid_md4_algorithm __asn1_algorithm = { - .name = "md4", - .digest = &md4_algorithm, - .oid = ASN1_OID_CURSOR ( oid_md4 ), -}; diff --git a/src/crypto/md5.c b/src/crypto/md5.c index 185a61f35..bee382e95 100644 --- a/src/crypto/md5.c +++ b/src/crypto/md5.c @@ -35,7 +35,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** MD5 variables */ @@ -293,13 +292,3 @@ struct digest_algorithm md5_algorithm = { .update = md5_update, .final = md5_final, }; - -/** "md5" object identifier */ -static uint8_t oid_md5[] = { ASN1_OID_MD5 }; - -/** "md5" OID-identified algorithm */ -struct asn1_algorithm oid_md5_algorithm __asn1_algorithm = { - .name = "md5", - .digest = &md5_algorithm, - .oid = ASN1_OID_CURSOR ( oid_md5 ), -}; diff --git a/src/crypto/mishmash/oid_md4.c b/src/crypto/mishmash/oid_md4.c new file mode 100644 index 000000000..d42f2df19 --- /dev/null +++ b/src/crypto/mishmash/oid_md4.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "md4" object identifier */ +static uint8_t oid_md4[] = { ASN1_OID_MD4 }; + +/** "md4" OID-identified algorithm */ +struct asn1_algorithm oid_md4_algorithm __asn1_algorithm = { + .name = "md4", + .digest = &md4_algorithm, + .oid = ASN1_CURSOR ( oid_md4 ), +}; diff --git a/src/crypto/mishmash/oid_md5.c b/src/crypto/mishmash/oid_md5.c new file mode 100644 index 000000000..f56dd8b8d --- /dev/null +++ b/src/crypto/mishmash/oid_md5.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "md5" object identifier */ +static uint8_t oid_md5[] = { ASN1_OID_MD5 }; + +/** "md5" OID-identified algorithm */ +struct asn1_algorithm oid_md5_algorithm __asn1_algorithm = { + .name = "md5", + .digest = &md5_algorithm, + .oid = ASN1_CURSOR ( oid_md5 ), +}; diff --git a/src/crypto/mishmash/oid_rsa.c b/src/crypto/mishmash/oid_rsa.c new file mode 100644 index 000000000..582022628 --- /dev/null +++ b/src/crypto/mishmash/oid_rsa.c @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "rsaEncryption" object identifier */ +static uint8_t oid_rsa_encryption[] = { ASN1_OID_RSAENCRYPTION }; + +/** "rsaEncryption" OID-identified algorithm */ +struct asn1_algorithm rsa_encryption_algorithm __asn1_algorithm = { + .name = "rsaEncryption", + .pubkey = &rsa_algorithm, + .digest = NULL, + .oid = ASN1_CURSOR ( oid_rsa_encryption ), +}; diff --git a/src/crypto/mishmash/oid_sha1.c b/src/crypto/mishmash/oid_sha1.c new file mode 100644 index 000000000..5dae6d27c --- /dev/null +++ b/src/crypto/mishmash/oid_sha1.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha1" object identifier */ +static uint8_t oid_sha1[] = { ASN1_OID_SHA1 }; + +/** "sha1" OID-identified algorithm */ +struct asn1_algorithm oid_sha1_algorithm __asn1_algorithm = { + .name = "sha1", + .digest = &sha1_algorithm, + .oid = ASN1_CURSOR ( oid_sha1 ), +}; diff --git a/src/crypto/mishmash/oid_sha224.c b/src/crypto/mishmash/oid_sha224.c new file mode 100644 index 000000000..ee7ed22e4 --- /dev/null +++ b/src/crypto/mishmash/oid_sha224.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha224" object identifier */ +static uint8_t oid_sha224[] = { ASN1_OID_SHA224 }; + +/** "sha224" OID-identified algorithm */ +struct asn1_algorithm oid_sha224_algorithm __asn1_algorithm = { + .name = "sha224", + .digest = &sha224_algorithm, + .oid = ASN1_CURSOR ( oid_sha224 ), +}; diff --git a/src/crypto/mishmash/oid_sha256.c b/src/crypto/mishmash/oid_sha256.c new file mode 100644 index 000000000..963fddb63 --- /dev/null +++ b/src/crypto/mishmash/oid_sha256.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha256" object identifier */ +static uint8_t oid_sha256[] = { ASN1_OID_SHA256 }; + +/** "sha256" OID-identified algorithm */ +struct asn1_algorithm oid_sha256_algorithm __asn1_algorithm = { + .name = "sha256", + .digest = &sha256_algorithm, + .oid = ASN1_CURSOR ( oid_sha256 ), +}; diff --git a/src/crypto/mishmash/oid_sha384.c b/src/crypto/mishmash/oid_sha384.c new file mode 100644 index 000000000..81ff48bbf --- /dev/null +++ b/src/crypto/mishmash/oid_sha384.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha384" object identifier */ +static uint8_t oid_sha384[] = { ASN1_OID_SHA384 }; + +/** "sha384" OID-identified algorithm */ +struct asn1_algorithm oid_sha384_algorithm __asn1_algorithm = { + .name = "sha384", + .digest = &sha384_algorithm, + .oid = ASN1_CURSOR ( oid_sha384 ), +}; diff --git a/src/crypto/mishmash/oid_sha512.c b/src/crypto/mishmash/oid_sha512.c new file mode 100644 index 000000000..78bae48b4 --- /dev/null +++ b/src/crypto/mishmash/oid_sha512.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha512" object identifier */ +static uint8_t oid_sha512[] = { ASN1_OID_SHA512 }; + +/** "sha512" OID-identified algorithm */ +struct asn1_algorithm oid_sha512_algorithm __asn1_algorithm = { + .name = "sha512", + .digest = &sha512_algorithm, + .oid = ASN1_CURSOR ( oid_sha512 ), +}; diff --git a/src/crypto/mishmash/oid_sha512_224.c b/src/crypto/mishmash/oid_sha512_224.c new file mode 100644 index 000000000..6f61f9cac --- /dev/null +++ b/src/crypto/mishmash/oid_sha512_224.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha512_224" object identifier */ +static uint8_t oid_sha512_224[] = { ASN1_OID_SHA512_224 }; + +/** "sha512_224" OID-identified algorithm */ +struct asn1_algorithm oid_sha512_224_algorithm __asn1_algorithm = { + .name = "sha512/224", + .digest = &sha512_224_algorithm, + .oid = ASN1_CURSOR ( oid_sha512_224 ), +}; diff --git a/src/crypto/mishmash/oid_sha512_256.c b/src/crypto/mishmash/oid_sha512_256.c new file mode 100644 index 000000000..bce4762e4 --- /dev/null +++ b/src/crypto/mishmash/oid_sha512_256.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** "sha512_256" object identifier */ +static uint8_t oid_sha512_256[] = { ASN1_OID_SHA512_256 }; + +/** "sha512_256" OID-identified algorithm */ +struct asn1_algorithm oid_sha512_256_algorithm __asn1_algorithm = { + .name = "sha512/256", + .digest = &sha512_256_algorithm, + .oid = ASN1_CURSOR ( oid_sha512_256 ), +}; diff --git a/src/crypto/mishmash/rsa_md5.c b/src/crypto/mishmash/rsa_md5.c index ac828ac11..051afe264 100644 --- a/src/crypto/mishmash/rsa_md5.c +++ b/src/crypto/mishmash/rsa_md5.c @@ -36,7 +36,7 @@ struct asn1_algorithm md5_with_rsa_encryption_algorithm __asn1_algorithm = { .name = "md5WithRSAEncryption", .pubkey = &rsa_algorithm, .digest = &md5_algorithm, - .oid = ASN1_OID_CURSOR ( oid_md5_with_rsa_encryption ), + .oid = ASN1_CURSOR ( oid_md5_with_rsa_encryption ), }; /** MD5 digestInfo prefix */ diff --git a/src/crypto/mishmash/rsa_sha1.c b/src/crypto/mishmash/rsa_sha1.c index 39424bf2d..264f871f1 100644 --- a/src/crypto/mishmash/rsa_sha1.c +++ b/src/crypto/mishmash/rsa_sha1.c @@ -37,7 +37,7 @@ struct asn1_algorithm sha1_with_rsa_encryption_algorithm __asn1_algorithm = { .name = "sha1WithRSAEncryption", .pubkey = &rsa_algorithm, .digest = &sha1_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha1_with_rsa_encryption ), + .oid = ASN1_CURSOR ( oid_sha1_with_rsa_encryption ), }; /** SHA-1 digestInfo prefix */ diff --git a/src/crypto/mishmash/rsa_sha224.c b/src/crypto/mishmash/rsa_sha224.c index 5e8755aab..1465a033d 100644 --- a/src/crypto/mishmash/rsa_sha224.c +++ b/src/crypto/mishmash/rsa_sha224.c @@ -37,7 +37,7 @@ struct asn1_algorithm sha224_with_rsa_encryption_algorithm __asn1_algorithm = { .name = "sha224WithRSAEncryption", .pubkey = &rsa_algorithm, .digest = &sha224_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha224_with_rsa_encryption ), + .oid = ASN1_CURSOR ( oid_sha224_with_rsa_encryption ), }; /** SHA-224 digestInfo prefix */ diff --git a/src/crypto/mishmash/rsa_sha256.c b/src/crypto/mishmash/rsa_sha256.c index b44af5f19..7283c3e29 100644 --- a/src/crypto/mishmash/rsa_sha256.c +++ b/src/crypto/mishmash/rsa_sha256.c @@ -37,7 +37,7 @@ struct asn1_algorithm sha256_with_rsa_encryption_algorithm __asn1_algorithm = { .name = "sha256WithRSAEncryption", .pubkey = &rsa_algorithm, .digest = &sha256_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha256_with_rsa_encryption ), + .oid = ASN1_CURSOR ( oid_sha256_with_rsa_encryption ), }; /** SHA-256 digestInfo prefix */ diff --git a/src/crypto/mishmash/rsa_sha384.c b/src/crypto/mishmash/rsa_sha384.c index af22a2bf0..6f8c29b29 100644 --- a/src/crypto/mishmash/rsa_sha384.c +++ b/src/crypto/mishmash/rsa_sha384.c @@ -37,7 +37,7 @@ struct asn1_algorithm sha384_with_rsa_encryption_algorithm __asn1_algorithm = { .name = "sha384WithRSAEncryption", .pubkey = &rsa_algorithm, .digest = &sha384_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha384_with_rsa_encryption ), + .oid = ASN1_CURSOR ( oid_sha384_with_rsa_encryption ), }; /** SHA-384 digestInfo prefix */ diff --git a/src/crypto/mishmash/rsa_sha512.c b/src/crypto/mishmash/rsa_sha512.c index 29ee15493..bb4463a5a 100644 --- a/src/crypto/mishmash/rsa_sha512.c +++ b/src/crypto/mishmash/rsa_sha512.c @@ -37,7 +37,7 @@ struct asn1_algorithm sha512_with_rsa_encryption_algorithm __asn1_algorithm = { .name = "sha512WithRSAEncryption", .pubkey = &rsa_algorithm, .digest = &sha512_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha512_with_rsa_encryption ), + .oid = ASN1_CURSOR ( oid_sha512_with_rsa_encryption ), }; /** SHA-512 digestInfo prefix */ diff --git a/src/crypto/ocsp.c b/src/crypto/ocsp.c index 2c747fb39..cc957b40c 100644 --- a/src/crypto/ocsp.c +++ b/src/crypto/ocsp.c @@ -116,7 +116,7 @@ static const uint8_t oid_basic_response_type[] = { ASN1_OID_OCSP_BASIC }; /** OCSP basic response type cursor */ static struct asn1_cursor oid_basic_response_type_cursor = - ASN1_OID_CURSOR ( oid_basic_response_type ); + ASN1_CURSOR ( oid_basic_response_type ); /** * Free OCSP check @@ -284,7 +284,7 @@ int ocsp_check ( struct x509_certificate *cert, /* Sanity checks */ assert ( cert != NULL ); assert ( issuer != NULL ); - assert ( x509_is_valid ( issuer ) ); + assert ( issuer->root != NULL ); /* Allocate and initialise check */ *ocsp = zalloc ( sizeof ( **ocsp ) ); @@ -476,33 +476,43 @@ static int ocsp_parse_responder_id ( struct ocsp_check *ocsp, */ static int ocsp_parse_cert_id ( struct ocsp_check *ocsp, const struct asn1_cursor *raw ) { + static struct asn1_cursor algorithm = { + .data = ocsp_algorithm_id, + .len = sizeof ( ocsp_algorithm_id ), + }; + struct asn1_cursor cert_id; struct asn1_cursor cursor; - struct asn1_algorithm *algorithm; int rc; - /* Check certID algorithm */ - memcpy ( &cursor, raw, sizeof ( cursor ) ); - asn1_enter ( &cursor, ASN1_SEQUENCE ); - if ( ( rc = asn1_digest_algorithm ( &cursor, &algorithm ) ) != 0 ) { - DBGC ( ocsp, "OCSP %p \"%s\" certID unknown algorithm: %s\n", - ocsp, x509_name ( ocsp->cert ), strerror ( rc ) ); - return rc; + /* Enter cert ID */ + memcpy ( &cert_id, raw, sizeof ( cert_id ) ); + asn1_enter ( &cert_id, ASN1_SEQUENCE ); + + /* Check certID algorithm (but not parameters) */ + memcpy ( &cursor, &cert_id, sizeof ( cursor ) ); + if ( ( rc = ( asn1_enter ( &cursor, ASN1_SEQUENCE ), + asn1_shrink ( &cursor, ASN1_OID ), + asn1_shrink ( &algorithm, ASN1_OID ) ) ) != 0 ) { + DBGC ( ocsp, "OCSP %p \"%s\" certID missing algorithm:\n", + ocsp, x509_name ( ocsp->cert ) ); + DBGC_HDA ( ocsp, 0, cursor.data, cursor.len ); + return -EACCES_CERT_MISMATCH; } - if ( algorithm->digest != &ocsp_digest_algorithm ) { - DBGC ( ocsp, "OCSP %p \"%s\" certID wrong algorithm %s\n", - ocsp, x509_name ( ocsp->cert ), - algorithm->digest->name ); + if ( asn1_compare ( &cursor, &algorithm ) != 0 ) { + DBGC ( ocsp, "OCSP %p \"%s\" certID wrong algorithm:\n", + ocsp, x509_name ( ocsp->cert ) ); + DBGC_HDA ( ocsp, 0, cursor.data, cursor.len ); return -EACCES_CERT_MISMATCH; } /* Check remaining certID fields */ - asn1_skip ( &cursor, ASN1_SEQUENCE ); - if ( asn1_compare ( &cursor, &ocsp->request.cert_id_tail ) != 0 ) { + asn1_skip ( &cert_id, ASN1_SEQUENCE ); + if ( asn1_compare ( &cert_id, &ocsp->request.cert_id_tail ) != 0 ) { DBGC ( ocsp, "OCSP %p \"%s\" certID mismatch:\n", ocsp, x509_name ( ocsp->cert ) ); DBGC_HDA ( ocsp, 0, ocsp->request.cert_id_tail.data, ocsp->request.cert_id_tail.len ); - DBGC_HDA ( ocsp, 0, cursor.data, cursor.len ); + DBGC_HDA ( ocsp, 0, cert_id.data, cert_id.len ); return -EACCES_CERT_MISMATCH; } @@ -822,18 +832,6 @@ int ocsp_response ( struct ocsp_check *ocsp, const void *data, size_t len ) { return 0; } -/** - * OCSP dummy root certificate store - * - * OCSP validation uses no root certificates, since it takes place - * only when there already exists a validated issuer certificate. - */ -static struct x509_root ocsp_root = { - .digest = &ocsp_digest_algorithm, - .count = 0, - .fingerprints = NULL, -}; - /** * Check OCSP response signature * @@ -917,7 +915,7 @@ int ocsp_validate ( struct ocsp_check *ocsp, time_t time ) { */ x509_invalidate ( signer ); if ( ( rc = x509_validate ( signer, ocsp->issuer, time, - &ocsp_root ) ) != 0 ) { + ocsp->issuer->root ) ) != 0 ) { DBGC ( ocsp, "OCSP %p \"%s\" could not validate ", ocsp, x509_name ( ocsp->cert ) ); DBGC ( ocsp, "signer \"%s\": %s\n", @@ -963,7 +961,7 @@ int ocsp_validate ( struct ocsp_check *ocsp, time_t time ) { /* Validate certificate against issuer */ if ( ( rc = x509_validate ( ocsp->cert, ocsp->issuer, time, - &ocsp_root ) ) != 0 ) { + ocsp->issuer->root ) ) != 0 ) { DBGC ( ocsp, "OCSP %p \"%s\" could not validate certificate: " "%s\n", ocsp, x509_name ( ocsp->cert ), strerror ( rc )); return rc; diff --git a/src/crypto/privkey.c b/src/crypto/privkey.c index 7ef04880f..c15edf130 100644 --- a/src/crypto/privkey.c +++ b/src/crypto/privkey.c @@ -64,9 +64,12 @@ __asm__ ( ".section \".rodata\", \"a\", " PROGBITS "\n\t" ".previous\n\t" ); /** Private key */ -struct asn1_cursor private_key = { - .data = private_key_data, - .len = ( ( size_t ) private_key_len ), +struct private_key private_key = { + .refcnt = REF_INIT ( ref_no_free ), + .builder = { + .data = private_key_data, + .len = ( ( size_t ) private_key_len ), + }, }; /** Default private key */ @@ -83,6 +86,19 @@ static struct setting privkey_setting __setting ( SETTING_CRYPTO, privkey ) = { .type = &setting_type_hex, }; +/** + * Free private key + * + * @v refcnt Reference counter + */ +void privkey_free ( struct refcnt *refcnt ) { + struct private_key *key = + container_of ( refcnt, struct private_key, refcnt ); + + free ( key->builder.data ); + free ( key ); +} + /** * Apply private key configuration settings * @@ -98,23 +114,24 @@ static int privkey_apply_settings ( void ) { if ( ALLOW_KEY_OVERRIDE ) { /* Restore default private key */ - memcpy ( &private_key, &default_private_key, - sizeof ( private_key ) ); + memcpy ( &private_key.builder, &default_private_key, + sizeof ( private_key.builder ) ); /* Fetch new private key, if any */ free ( key_data ); if ( ( len = fetch_raw_setting_copy ( NULL, &privkey_setting, &key_data ) ) >= 0 ) { - private_key.data = key_data; - private_key.len = len; + private_key.builder.data = key_data; + private_key.builder.len = len; } } /* Debug */ - if ( private_key.len ) { + if ( private_key.builder.len ) { DBGC ( &private_key, "PRIVKEY using %s private key:\n", ( key_data ? "external" : "built-in" ) ); - DBGC_HDA ( &private_key, 0, private_key.data, private_key.len ); + DBGC_HDA ( &private_key, 0, private_key.builder.data, + private_key.builder.len ); } else { DBGC ( &private_key, "PRIVKEY has no private key\n" ); } diff --git a/src/crypto/rootcert.c b/src/crypto/rootcert.c index 867ff50e8..0835ff071 100644 --- a/src/crypto/rootcert.c +++ b/src/crypto/rootcert.c @@ -71,6 +71,7 @@ static struct setting trust_setting __setting ( SETTING_CRYPTO, trust ) = { /** Root certificates */ struct x509_root root_certificates = { + .refcnt = REF_INIT ( ref_no_free ), .digest = &sha256_algorithm, .count = ( sizeof ( fingerprints ) / FINGERPRINT_LEN ), .fingerprints = fingerprints, diff --git a/src/crypto/rsa.c b/src/crypto/rsa.c index 2c5cf67dd..a38955744 100644 --- a/src/crypto/rsa.c +++ b/src/crypto/rsa.c @@ -47,17 +47,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define EINFO_EACCES_VERIFY \ __einfo_uniqify ( EINFO_EACCES, 0x01, "RSA signature incorrect" ) -/** "rsaEncryption" object identifier */ -static uint8_t oid_rsa_encryption[] = { ASN1_OID_RSAENCRYPTION }; - -/** "rsaEncryption" OID-identified algorithm */ -struct asn1_algorithm rsa_encryption_algorithm __asn1_algorithm = { - .name = "rsaEncryption", - .pubkey = &rsa_algorithm, - .digest = NULL, - .oid = ASN1_OID_CURSOR ( oid_rsa_encryption ), -}; - /** * Identify RSA prefix * diff --git a/src/crypto/sha1.c b/src/crypto/sha1.c index 51866f4b7..94fce0029 100644 --- a/src/crypto/sha1.c +++ b/src/crypto/sha1.c @@ -35,7 +35,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** SHA-1 variables */ @@ -264,13 +263,3 @@ struct digest_algorithm sha1_algorithm = { .update = sha1_update, .final = sha1_final, }; - -/** "sha1" object identifier */ -static uint8_t oid_sha1[] = { ASN1_OID_SHA1 }; - -/** "sha1" OID-identified algorithm */ -struct asn1_algorithm oid_sha1_algorithm __asn1_algorithm = { - .name = "sha1", - .digest = &sha1_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha1 ), -}; diff --git a/src/crypto/sha224.c b/src/crypto/sha224.c index be25f24e9..e54a0abb0 100644 --- a/src/crypto/sha224.c +++ b/src/crypto/sha224.c @@ -32,7 +32,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** SHA-224 initial digest values */ @@ -70,13 +69,3 @@ struct digest_algorithm sha224_algorithm = { .update = sha256_update, .final = sha256_final, }; - -/** "sha224" object identifier */ -static uint8_t oid_sha224[] = { ASN1_OID_SHA224 }; - -/** "sha224" OID-identified algorithm */ -struct asn1_algorithm oid_sha224_algorithm __asn1_algorithm = { - .name = "sha224", - .digest = &sha224_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha224 ), -}; diff --git a/src/crypto/sha256.c b/src/crypto/sha256.c index 0360d8d16..6bd727719 100644 --- a/src/crypto/sha256.c +++ b/src/crypto/sha256.c @@ -35,7 +35,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** SHA-256 variables */ @@ -271,13 +270,3 @@ struct digest_algorithm sha256_algorithm = { .update = sha256_update, .final = sha256_final, }; - -/** "sha256" object identifier */ -static uint8_t oid_sha256[] = { ASN1_OID_SHA256 }; - -/** "sha256" OID-identified algorithm */ -struct asn1_algorithm oid_sha256_algorithm __asn1_algorithm = { - .name = "sha256", - .digest = &sha256_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha256 ), -}; diff --git a/src/crypto/sha384.c b/src/crypto/sha384.c index 017751826..f1af6fc6f 100644 --- a/src/crypto/sha384.c +++ b/src/crypto/sha384.c @@ -32,7 +32,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** SHA-384 initial digest values */ @@ -70,13 +69,3 @@ struct digest_algorithm sha384_algorithm = { .update = sha512_update, .final = sha512_final, }; - -/** "sha384" object identifier */ -static uint8_t oid_sha384[] = { ASN1_OID_SHA384 }; - -/** "sha384" OID-identified algorithm */ -struct asn1_algorithm oid_sha384_algorithm __asn1_algorithm = { - .name = "sha384", - .digest = &sha384_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha384 ), -}; diff --git a/src/crypto/sha512.c b/src/crypto/sha512.c index 814f44563..e84895010 100644 --- a/src/crypto/sha512.c +++ b/src/crypto/sha512.c @@ -35,7 +35,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** SHA-512 variables */ @@ -291,13 +290,3 @@ struct digest_algorithm sha512_algorithm = { .update = sha512_update, .final = sha512_final, }; - -/** "sha512" object identifier */ -static uint8_t oid_sha512[] = { ASN1_OID_SHA512 }; - -/** "sha512" OID-identified algorithm */ -struct asn1_algorithm oid_sha512_algorithm __asn1_algorithm = { - .name = "sha512", - .digest = &sha512_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha512 ), -}; diff --git a/src/crypto/sha512_224.c b/src/crypto/sha512_224.c index 8c37b566b..b6728726c 100644 --- a/src/crypto/sha512_224.c +++ b/src/crypto/sha512_224.c @@ -32,7 +32,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** SHA-512/224 initial digest values */ @@ -71,13 +70,3 @@ struct digest_algorithm sha512_224_algorithm = { .update = sha512_update, .final = sha512_final, }; - -/** "sha512_224" object identifier */ -static uint8_t oid_sha512_224[] = { ASN1_OID_SHA512_224 }; - -/** "sha512_224" OID-identified algorithm */ -struct asn1_algorithm oid_sha512_224_algorithm __asn1_algorithm = { - .name = "sha512/224", - .digest = &sha512_224_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha512_224 ), -}; diff --git a/src/crypto/sha512_256.c b/src/crypto/sha512_256.c index f8afaf3e3..8163631e0 100644 --- a/src/crypto/sha512_256.c +++ b/src/crypto/sha512_256.c @@ -32,7 +32,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include /** SHA-512/256 initial digest values */ @@ -71,13 +70,3 @@ struct digest_algorithm sha512_256_algorithm = { .update = sha512_update, .final = sha512_final, }; - -/** "sha512_256" object identifier */ -static uint8_t oid_sha512_256[] = { ASN1_OID_SHA512_256 }; - -/** "sha512_256" OID-identified algorithm */ -struct asn1_algorithm oid_sha512_256_algorithm __asn1_algorithm = { - .name = "sha512/256", - .digest = &sha512_256_algorithm, - .oid = ASN1_OID_CURSOR ( oid_sha512_256 ), -}; diff --git a/src/crypto/x509.c b/src/crypto/x509.c index feb7e4a0a..1f017eb03 100644 --- a/src/crypto/x509.c +++ b/src/crypto/x509.c @@ -25,6 +25,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include +#include #include #include #include @@ -122,6 +123,19 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define EINFO_EACCES_USELESS \ __einfo_uniqify ( EINFO_EACCES, 0x0b, "No usable certificates" ) +/** + * Free X.509 certificate + * + * @v refcnt Reference count + */ +static void x509_free ( struct refcnt *refcnt ) { + struct x509_certificate *cert = + container_of ( refcnt, struct x509_certificate, refcnt ); + + x509_root_put ( cert->root ); + free ( cert ); +} + /** * Get X.509 certificate display name * @@ -156,7 +170,7 @@ static uint8_t oid_common_name[] = { ASN1_OID_COMMON_NAME }; /** "commonName" object identifier cursor */ static struct asn1_cursor oid_common_name_cursor = - ASN1_OID_CURSOR ( oid_common_name ); + ASN1_CURSOR ( oid_common_name ); /** * Parse X.509 certificate version @@ -523,12 +537,12 @@ static struct x509_key_purpose x509_key_purposes[] = { { .name = "codeSigning", .bits = X509_CODE_SIGNING, - .oid = ASN1_OID_CURSOR ( oid_code_signing ), + .oid = ASN1_CURSOR ( oid_code_signing ), }, { .name = "ocspSigning", .bits = X509_OCSP_SIGNING, - .oid = ASN1_OID_CURSOR ( oid_ocsp_signing ), + .oid = ASN1_CURSOR ( oid_ocsp_signing ), }, }; @@ -631,7 +645,7 @@ static uint8_t oid_ad_ocsp[] = { ASN1_OID_OCSP }; static struct x509_access_method x509_access_methods[] = { { .name = "OCSP", - .oid = ASN1_OID_CURSOR ( oid_ad_ocsp ), + .oid = ASN1_CURSOR ( oid_ad_ocsp ), .parse = x509_parse_ocsp, }, }; @@ -768,27 +782,27 @@ static uint8_t oid_ce_subject_alt_name[] = static struct x509_extension x509_extensions[] = { { .name = "basicConstraints", - .oid = ASN1_OID_CURSOR ( oid_ce_basic_constraints ), + .oid = ASN1_CURSOR ( oid_ce_basic_constraints ), .parse = x509_parse_basic_constraints, }, { .name = "keyUsage", - .oid = ASN1_OID_CURSOR ( oid_ce_key_usage ), + .oid = ASN1_CURSOR ( oid_ce_key_usage ), .parse = x509_parse_key_usage, }, { .name = "extKeyUsage", - .oid = ASN1_OID_CURSOR ( oid_ce_ext_key_usage ), + .oid = ASN1_CURSOR ( oid_ce_ext_key_usage ), .parse = x509_parse_extended_key_usage, }, { .name = "authorityInfoAccess", - .oid = ASN1_OID_CURSOR ( oid_pe_authority_info_access ), + .oid = ASN1_CURSOR ( oid_pe_authority_info_access ), .parse = x509_parse_authority_info_access, }, { .name = "subjectAltName", - .oid = ASN1_OID_CURSOR ( oid_ce_subject_alt_name ), + .oid = ASN1_CURSOR ( oid_ce_subject_alt_name ), .parse = x509_parse_subject_alt_name, }, }; @@ -1075,7 +1089,7 @@ int x509_certificate ( const void *data, size_t len, *cert = zalloc ( sizeof ( **cert ) + cursor.len ); if ( ! *cert ) return -ENOMEM; - ref_init ( &(*cert)->refcnt, NULL ); + ref_init ( &(*cert)->refcnt, x509_free ); raw = ( *cert + 1 ); /* Copy raw data */ @@ -1295,6 +1309,50 @@ int x509_check_time ( struct x509_certificate *cert, time_t time ) { return 0; } +/** + * Check if X.509 certificate is valid + * + * @v cert X.509 certificate + * @v root Root certificate list, or NULL to use default + */ +int x509_is_valid ( struct x509_certificate *cert, struct x509_root *root ) { + + /* Use default root certificate store if none specified */ + if ( ! root ) + root = &root_certificates; + + return ( cert->root == root ); +} + +/** + * Set X.509 certificate as validated + * + * @v cert X.509 certificate + * @v issuer Issuing X.509 certificate (or NULL) + * @v root Root certificate list + */ +static void x509_set_valid ( struct x509_certificate *cert, + struct x509_certificate *issuer, + struct x509_root *root ) { + unsigned int max_path_remaining; + + /* Sanity checks */ + assert ( root != NULL ); + assert ( ( issuer == NULL ) || ( issuer->path_remaining >= 1 ) ); + + /* Record validation root */ + x509_root_put ( cert->root ); + cert->root = x509_root_get ( root ); + + /* Calculate effective path length */ + cert->path_remaining = ( cert->extensions.basic.path_len + 1 ); + if ( issuer ) { + max_path_remaining = ( issuer->path_remaining - 1 ); + if ( cert->path_remaining > max_path_remaining ) + cert->path_remaining = max_path_remaining; + } +} + /** * Validate X.509 certificate * @@ -1313,7 +1371,6 @@ int x509_check_time ( struct x509_certificate *cert, time_t time ) { int x509_validate ( struct x509_certificate *cert, struct x509_certificate *issuer, time_t time, struct x509_root *root ) { - unsigned int max_path_remaining; int rc; /* Use default root certificate store if none specified */ @@ -1321,7 +1378,7 @@ int x509_validate ( struct x509_certificate *cert, root = &root_certificates; /* Return success if certificate has already been validated */ - if ( x509_is_valid ( cert ) ) + if ( x509_is_valid ( cert, root ) ) return 0; /* Fail if certificate is invalid at specified time */ @@ -1330,20 +1387,19 @@ int x509_validate ( struct x509_certificate *cert, /* Succeed if certificate is a trusted root certificate */ if ( x509_check_root ( cert, root ) == 0 ) { - cert->flags |= X509_FL_VALIDATED; - cert->path_remaining = ( cert->extensions.basic.path_len + 1 ); + x509_set_valid ( cert, NULL, root ); return 0; } /* Fail unless we have an issuer */ if ( ! issuer ) { - DBGC2 ( cert, "X509 %p \"%s\" has no issuer\n", + DBGC2 ( cert, "X509 %p \"%s\" has no trusted issuer\n", cert, x509_name ( cert ) ); return -EACCES_UNTRUSTED; } /* Fail unless issuer has already been validated */ - if ( ! x509_is_valid ( issuer ) ) { + if ( ! x509_is_valid ( issuer, root ) ) { DBGC ( cert, "X509 %p \"%s\" ", cert, x509_name ( cert ) ); DBGC ( cert, "issuer %p \"%s\" has not yet been validated\n", issuer, x509_name ( issuer ) ); @@ -1369,14 +1425,8 @@ int x509_validate ( struct x509_certificate *cert, return -EACCES_OCSP_REQUIRED; } - /* Calculate effective path length */ - cert->path_remaining = ( issuer->path_remaining - 1 ); - max_path_remaining = ( cert->extensions.basic.path_len + 1 ); - if ( cert->path_remaining > max_path_remaining ) - cert->path_remaining = max_path_remaining; - /* Mark certificate as valid */ - cert->flags |= X509_FL_VALIDATED; + x509_set_valid ( cert, issuer, root ); DBGC ( cert, "X509 %p \"%s\" successfully validated using ", cert, x509_name ( cert ) ); @@ -1415,7 +1465,7 @@ static int x509_check_dnsname ( struct x509_certificate *cert, /* Compare names */ if ( ! ( ( strlen ( name ) == len ) && - ( memcmp ( name, dnsname, len ) == 0 ) ) ) + ( strncasecmp ( name, dnsname, len ) == 0 ) ) ) return -ENOENT; if ( name != fullname ) { diff --git a/src/drivers/block/ibft.c b/src/drivers/block/ibft.c index f9918363a..ca5fad9ff 100644 --- a/src/drivers/block/ibft.c +++ b/src/drivers/block/ibft.c @@ -48,11 +48,15 @@ FILE_LICENCE ( BSD2 ); * * iSCSI boot firmware table * - * The information in this file is derived from the document "iSCSI - * Boot Firmware Table (iBFT)" as published by IBM at + * The information in this file is originally derived from the document "iSCSI + * Boot Firmware Table (iBFT)" as published by IBM at: * * ftp://ftp.software.ibm.com/systems/support/system_x_pdf/ibm_iscsi_boot_firmware_table_v1.02.pdf * + * That file is no longer available, but a more recent version is available: + * + * ftp://ftp.software.ibm.com/systems/support/bladecenter/iscsi_boot_firmware_table_v1.03.pdf + * */ /** diff --git a/src/drivers/bus/isa.c b/src/drivers/bus/isa.c index da0c43c60..94d4ce99b 100644 --- a/src/drivers/bus/isa.c +++ b/src/drivers/bus/isa.c @@ -5,6 +5,7 @@ #include #include #include +#include FILE_LICENCE ( GPL2_OR_LATER ); @@ -94,7 +95,7 @@ static void isa_remove ( struct isa_device *isa ) { static int isabus_probe ( struct root_device *rootdev ) { struct isa_device *isa = NULL; struct isa_driver *driver; - int ioidx; + long ioidx; int rc; for_each_table_entry ( driver, ISA_DRIVERS ) { diff --git a/src/drivers/bus/pci.c b/src/drivers/bus/pci.c index 06b36a770..1b7350c8b 100644 --- a/src/drivers/bus/pci.c +++ b/src/drivers/bus/pci.c @@ -228,6 +228,9 @@ int pci_read_config ( struct pci_device *pci ) { */ int pci_find_next ( struct pci_device *pci, unsigned int busdevfn ) { static unsigned int end; + unsigned int sub_end; + uint8_t hdrtype; + uint8_t sub; int rc; /* Determine number of PCI buses */ @@ -236,10 +239,30 @@ int pci_find_next ( struct pci_device *pci, unsigned int busdevfn ) { /* Find next PCI device, if any */ for ( ; busdevfn < end ; busdevfn++ ) { + + /* Check for PCI device existence */ memset ( pci, 0, sizeof ( *pci ) ); pci_init ( pci, busdevfn ); - if ( ( rc = pci_read_config ( pci ) ) == 0 ) - return busdevfn; + if ( ( rc = pci_read_config ( pci ) ) != 0 ) + continue; + + /* If device is a bridge, expand the number of PCI + * buses as needed. + */ + pci_read_config_byte ( pci, PCI_HEADER_TYPE, &hdrtype ); + hdrtype &= PCI_HEADER_TYPE_MASK; + if ( hdrtype == PCI_HEADER_TYPE_BRIDGE ) { + pci_read_config_byte ( pci, PCI_SUBORDINATE, &sub ); + sub_end = PCI_BUSDEVFN ( 0, ( sub + 1 ), 0, 0 ); + if ( end < sub_end ) { + DBGC ( pci, PCI_FMT " found subordinate bus " + "%#02x\n", PCI_ARGS ( pci ), sub ); + end = sub_end; + } + } + + /* Return this device */ + return busdevfn; } return -ENODEV; diff --git a/src/drivers/bus/pcimsix.c b/src/drivers/bus/pcimsix.c index 80893c418..eb0450d91 100644 --- a/src/drivers/bus/pcimsix.c +++ b/src/drivers/bus/pcimsix.c @@ -84,7 +84,7 @@ static void * pci_msix_ioremap ( struct pci_device *pci, struct pci_msix *msix, msix, pci_msix_name ( cfg ), base, bar, offset ); /* Map BAR portion */ - io = ioremap ( ( start + offset ), PCI_MSIX_LEN ); + io = pci_ioremap ( pci, ( start + offset ), PCI_MSIX_LEN ); if ( ! io ) { DBGC ( msix, "MSI-X %p %s could not map %#08lx\n", msix, pci_msix_name ( cfg ), base ); diff --git a/src/drivers/bus/usb.c b/src/drivers/bus/usb.c index d8db3849a..428ae26c1 100644 --- a/src/drivers/bus/usb.c +++ b/src/drivers/bus/usb.c @@ -362,6 +362,35 @@ static int usb_endpoint_clear_tt ( struct usb_endpoint *ep ) { return 0; } +/** + * Clear endpoint halt (if applicable) + * + * @v ep USB endpoint + * @ret rc Return status code + */ +int usb_endpoint_clear_halt ( struct usb_endpoint *ep ) { + struct usb_device *usb = ep->usb; + unsigned int type; + int rc; + + /* Clear transaction translator, if applicable */ + if ( ( rc = usb_endpoint_clear_tt ( ep ) ) != 0 ) + return rc; + + /* Clear endpoint halt (if applicable) */ + type = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); + if ( ( type != USB_ENDPOINT_ATTR_CONTROL ) && + ( ( rc = usb_clear_feature ( usb, USB_RECIP_ENDPOINT, + USB_ENDPOINT_HALT, + ep->address ) ) != 0 ) ) { + DBGC ( usb, "USB %s %s could not clear endpoint halt: %s\n", + usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); + return rc; + } + + return 0; +} + /** * Close USB endpoint * @@ -399,12 +428,15 @@ void usb_endpoint_close ( struct usb_endpoint *ep ) { */ static int usb_endpoint_reset ( struct usb_endpoint *ep ) { struct usb_device *usb = ep->usb; - unsigned int type; int rc; /* Sanity check */ assert ( ! list_empty ( &ep->halted ) ); + /* Clear device halt, if applicable */ + if ( ( rc = usb_endpoint_clear_halt ( ep ) ) != 0 ) + return rc; + /* Reset endpoint */ if ( ( rc = ep->host->reset ( ep ) ) != 0 ) { DBGC ( usb, "USB %s %s could not reset: %s\n", @@ -412,21 +444,6 @@ static int usb_endpoint_reset ( struct usb_endpoint *ep ) { return rc; } - /* Clear transaction translator, if applicable */ - if ( ( rc = usb_endpoint_clear_tt ( ep ) ) != 0 ) - return rc; - - /* Clear endpoint halt, if applicable */ - type = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK ); - if ( ( type != USB_ENDPOINT_ATTR_CONTROL ) && - ( ( rc = usb_clear_feature ( usb, USB_RECIP_ENDPOINT, - USB_ENDPOINT_HALT, - ep->address ) ) != 0 ) ) { - DBGC ( usb, "USB %s %s could not clear endpoint halt: %s\n", - usb->name, usb_endpoint_name ( ep ), strerror ( rc ) ); - return rc; - } - /* Remove from list of halted endpoints */ list_del ( &ep->halted ); INIT_LIST_HEAD ( &ep->halted ); @@ -634,12 +651,13 @@ int usb_prefill ( struct usb_endpoint *ep ) { } /** - * Refill endpoint + * Refill endpoint up to specified limit * * @v ep USB endpoint + * @v max Fill limit * @ret rc Return status code */ -int usb_refill ( struct usb_endpoint *ep ) { +int usb_refill_limit ( struct usb_endpoint *ep, unsigned int max ) { struct io_buffer *iobuf; size_t reserve = ep->reserve; size_t len = ( ep->len ? ep->len : ep->mtu ); @@ -650,7 +668,9 @@ int usb_refill ( struct usb_endpoint *ep ) { assert ( ep->max > 0 ); /* Refill endpoint */ - while ( ep->fill < ep->max ) { + if ( max > ep->max ) + max = ep->max; + while ( ep->fill < max ) { /* Get or allocate buffer */ if ( list_empty ( &ep->recycled ) ) { @@ -681,6 +701,16 @@ int usb_refill ( struct usb_endpoint *ep ) { return 0; } +/** + * Refill endpoint + * + * @v ep USB endpoint + * @ret rc Return status code + */ +int usb_refill ( struct usb_endpoint *ep ) { + return usb_refill_limit ( ep, ep->max ); +} + /** * Discard endpoint recycled buffer list * @@ -818,6 +848,7 @@ int usb_control ( struct usb_device *usb, unsigned int request, "failed: %s\n", usb->name, request, value, index, strerror ( rc ) ); free_iob ( cmplt ); + usb_endpoint_reset ( ep ); return rc; } @@ -912,9 +943,15 @@ int usb_get_string_descriptor ( struct usb_device *usb, unsigned int index, sizeof ( *desc ) ) ) != 0 ) goto err_get_descriptor; - /* Copy to buffer */ + /* Calculate string length */ + if ( desc->header.len < sizeof ( desc->header ) ) { + rc = -EINVAL; + goto err_len; + } actual = ( ( desc->header.len - sizeof ( desc->header ) ) / sizeof ( desc->character[0] ) ); + + /* Copy to buffer */ for ( i = 0 ; ( ( i < actual ) && ( i < max ) ) ; i++ ) buf[i] = le16_to_cpu ( desc->character[i] ); if ( len ) @@ -925,6 +962,7 @@ int usb_get_string_descriptor ( struct usb_device *usb, unsigned int index, return actual; + err_len: err_get_descriptor: free ( desc ); err_alloc: @@ -1615,7 +1653,9 @@ static int register_usb ( struct usb_device *usb ) { usb->host->close ( usb ); err_open: err_speed: - hub->driver->disable ( hub, port ); + /* Leave port enabled on failure, to avoid an endless loop of + * failed device registrations. + */ err_enable: list_del ( &usb->list ); port->usb = NULL; @@ -1634,6 +1674,11 @@ static void unregister_usb ( struct usb_device *usb ) { struct io_buffer *iobuf; struct io_buffer *tmp; + DBGC ( usb, "USB %s addr %d %04x:%04x class %d:%d:%d removed\n", + usb->name, usb->address, le16_to_cpu ( usb->device.vendor ), + le16_to_cpu ( usb->device.product ), usb->device.class.class, + usb->device.class.subclass, usb->device.class.protocol ); + /* Sanity checks */ assert ( port->usb == usb ); @@ -2232,23 +2277,6 @@ unsigned int usb_route_string ( struct usb_device *usb ) { return route; } -/** - * Get USB depth - * - * @v usb USB device - * @ret depth Hub depth - */ -unsigned int usb_depth ( struct usb_device *usb ) { - struct usb_device *parent; - unsigned int depth; - - /* Navigate up to root hub, constructing depth as we go */ - for ( depth = 0 ; ( parent = usb->port->hub->usb ) ; usb = parent ) - depth++; - - return depth; -} - /** * Get USB root hub port * diff --git a/src/drivers/bus/virtio-pci.c b/src/drivers/bus/virtio-pci.c index 402bf4f12..5d2d62750 100644 --- a/src/drivers/bus/virtio-pci.c +++ b/src/drivers/bus/virtio-pci.c @@ -321,7 +321,7 @@ int virtio_pci_map_capability(struct pci_device *pci, int cap, size_t minlen, region->flags = VIRTIO_PCI_REGION_PORT; } else { /* Region mapped into memory space */ - region->base = ioremap(base + offset, length); + region->base = pci_ioremap(pci, base + offset, length); region->flags = VIRTIO_PCI_REGION_MEMORY; } } diff --git a/src/drivers/infiniband/MT25218_PRM.h b/src/drivers/infiniband/MT25218_PRM.h index 4011bd0ba..c4dc3316b 100644 --- a/src/drivers/infiniband/MT25218_PRM.h +++ b/src/drivers/infiniband/MT25218_PRM.h @@ -73,7 +73,7 @@ struct arbelprm_ud_address_vector_st { /* Little Endian */ /* -------------- */ pseudo_bit_t rgid_31_0[0x00020]; /* Remote GID[31:0] if G bit is set. Must be set to 0x2 if G bit is cleared. */ /* -------------- */ -}; +}; /* Send doorbell */ @@ -88,7 +88,7 @@ struct arbelprm_send_doorbell_st { /* Little Endian */ pseudo_bit_t reserved1[0x00002]; pseudo_bit_t qpn[0x00018]; /* QP number this doorbell is rung on */ /* -------------- */ -}; +}; /* ACCESS_LAM_inject_errors_input_modifier */ @@ -102,7 +102,7 @@ struct arbelprm_access_lam_inject_errors_input_modifier_st { /* Little Endian */ pseudo_bit_t index0[0x00007]; pseudo_bit_t q0[0x00001]; /* -------------- */ -}; +}; /* ACCESS_LAM_inject_errors_input_parameter */ @@ -114,7 +114,7 @@ struct arbelprm_access_lam_inject_errors_input_parameter_st { /* Little Endian * pseudo_bit_t ra[0x00010]; /* Row Address */ pseudo_bit_t ca[0x00010]; /* Column Address */ /* -------------- */ -}; +}; /* */ @@ -127,7 +127,7 @@ struct arbelprm_recv_wqe_segment_next_st { /* Little Endian */ */ pseudo_bit_t reserved1[0x0001a]; /* -------------- */ -}; +}; /* Send wqe segment data inline */ @@ -141,7 +141,7 @@ struct arbelprm_wqe_segment_data_inline_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x00040]; /* -------------- */ -}; +}; /* Send wqe segment data ptr */ @@ -155,7 +155,7 @@ struct arbelprm_wqe_segment_data_ptr_st { /* Little Endian */ /* -------------- */ pseudo_bit_t local_address_l[0x00020]; /* -------------- */ -}; +}; /* Send wqe segment rd */ @@ -167,7 +167,7 @@ struct arbelprm_local_invalidate_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x000a0]; /* -------------- */ -}; +}; /* Fast_Registration_Segment */ @@ -197,7 +197,7 @@ struct arbelprm_fast_registration_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reg_len_l[0x00020]; /* Region Length[31:0] */ /* -------------- */ -}; +}; /* Send wqe segment atomic */ @@ -210,7 +210,7 @@ struct arbelprm_wqe_segment_atomic_st { /* Little Endian */ /* -------------- */ pseudo_bit_t compare_l[0x00020]; /* -------------- */ -}; +}; /* Send wqe segment remote address */ @@ -223,7 +223,7 @@ struct arbelprm_wqe_segment_remote_address_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved0[0x00020]; /* -------------- */ -}; +}; /* end wqe segment bind */ @@ -253,7 +253,7 @@ struct arbelprm_wqe_segment_bind_st { /* Little Endian */ /* -------------- */ pseudo_bit_t length_l[0x00020]; /* -------------- */ -}; +}; /* Send wqe segment ud */ @@ -267,7 +267,7 @@ struct arbelprm_wqe_segment_ud_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00040]; /* -------------- */ -}; +}; /* Send wqe segment rd */ @@ -279,7 +279,7 @@ struct arbelprm_wqe_segment_rd_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00040]; /* -------------- */ -}; +}; /* Send wqe segment ctrl */ @@ -296,7 +296,7 @@ struct arbelprm_wqe_segment_ctrl_send_st { /* Little Endian */ /* -------------- */ pseudo_bit_t immediate[0x00020]; /* If the OpCode encodes an operation with Immediate (RDMA-write/SEND), This field will hold the Immediate data to be sent. If the OpCode encodes send and invalidate operations, this field holds the Invalidation key to be inserted into the packet; otherwise, this field is reserved. */ /* -------------- */ -}; +}; /* Send wqe segment next */ @@ -338,7 +338,7 @@ struct arbelprm_wqe_segment_next_st { /* Little Endian */ pseudo_bit_t always1[0x00001]; pseudo_bit_t reserved1[0x00018]; /* -------------- */ -}; +}; /* Address Path */ @@ -384,7 +384,7 @@ struct arbelprm_address_path_st { /* Little Endian */ /* -------------- */ pseudo_bit_t rgid_31_0[0x00020]; /* Remote GID[31:0] */ /* -------------- */ -}; +}; /* HCA Command Register (HCR) */ @@ -414,7 +414,7 @@ struct arbelprm_hca_command_register_st { /* Little Endian */ pseudo_bit_t status[0x00008]; /* Command execution status report. Valid only if command interface in under SW ownership (Go bit is cleared) 0 - command completed without error. If different than zero, command execution completed with error. Syndrom encoding is depended on command executed and is defined for each command */ /* -------------- */ -}; +}; /* CQ Doorbell */ @@ -435,7 +435,7 @@ struct arbelprm_cq_cmd_doorbell_st { /* Little Endian */ /* -------------- */ pseudo_bit_t cq_param[0x00020]; /* parameter to be used by CQ command */ /* -------------- */ -}; +}; /* RD-send doorbell */ @@ -449,7 +449,7 @@ struct arbelprm_rd_send_doorbell_st { /* Little Endian */ /* -------------- */ struct arbelprm_send_doorbell_st send_doorbell;/* Send Parameters */ /* -------------- */ -}; +}; /* Multicast Group Member QP */ @@ -458,7 +458,7 @@ struct arbelprm_mgmqp_st { /* Little Endian */ pseudo_bit_t reserved0[0x00007]; pseudo_bit_t qi[0x00001]; /* Qi: QPN_i is valid */ /* -------------- */ -}; +}; /* vsd */ @@ -575,7 +575,7 @@ struct arbelprm_vsd_st { /* Little Endian */ /* -------------- */ pseudo_bit_t vsd_dw55[0x00020]; /* -------------- */ -}; +}; /* ACCESS_LAM_inject_errors */ @@ -586,7 +586,7 @@ struct arbelprm_access_lam_inject_errors_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved0[0x00020]; /* -------------- */ -}; +}; /* Logical DIMM Information */ @@ -626,7 +626,7 @@ struct arbelprm_dimminfo_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x00040]; /* -------------- */ -}; +}; /* UAR Parameters */ @@ -661,7 +661,7 @@ struct arbelprm_uar_params_st { /* Little Endian */ Number of entries in table is 2^log_max_uars. Table must be aligned to its size. */ /* -------------- */ -}; +}; /* Translation and Protection Tables Parameters */ @@ -695,7 +695,7 @@ struct arbelprm_tptparams_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00040]; /* -------------- */ -}; +}; /* Multicast Support Parameters */ @@ -729,7 +729,7 @@ struct arbelprm_multicastparam_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved5[0x00020]; /* -------------- */ -}; +}; /* QPC/EEC/CQC/EQC/RDB Parameters */ @@ -821,7 +821,7 @@ struct arbelprm_qpcbaseaddr_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved10[0x00040]; /* -------------- */ -}; +}; /* Header_Log_Register */ @@ -830,7 +830,7 @@ struct arbelprm_header_log_register_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved0[0x00060]; /* -------------- */ -}; +}; /* Performance Monitors */ @@ -862,7 +862,7 @@ struct arbelprm_performance_monitors_st { /* Little Endian */ /* -------------- */ pseudo_bit_t event_counter2[0x00020];/* Read/write event counter, counting events specified by EvCntl and EvCnt2 fields repsectively. When the event counter reaches is maximum value of 0xFFFFFF, the next event will cause it to roll over to zero, set F1 or F2 bit respectively and generate interrupt by I1 I2 bit respectively. */ /* -------------- */ -}; +}; /* Receive segment format */ @@ -876,7 +876,7 @@ struct arbelprm_wqe_segment_ctrl_recv_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x00020]; /* -------------- */ -}; +}; /* MLX WQE segment format */ @@ -895,7 +895,7 @@ struct arbelprm_wqe_segment_ctrl_mlx_st { /* Little Endian */ pseudo_bit_t vcrc[0x00010]; /* Packet's VCRC (if not 0 - otherwise computed by HW) */ pseudo_bit_t rlid[0x00010]; /* Destination LID (must match given headers) */ /* -------------- */ -}; +}; /* Send WQE segment format */ @@ -926,7 +926,7 @@ struct arbelprm_send_wqe_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00200]; /* -------------- */ -}; +}; /* QP and EE Context Entry */ @@ -1118,7 +1118,7 @@ struct arbelprm_queue_pair_ee_context_entry_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved33[0x00040]; /* -------------- */ -}; +}; /* Clear Interrupt [63:0] */ @@ -1132,7 +1132,7 @@ struct arbelprm_clr_int_st { /* Little Endian */ Write transactions to this register will clear (de-assert) the virtual interrupt output pins of InfiniHost-III-EX. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. This register is write-only. Reading from this register will cause undefined result */ /* -------------- */ -}; +}; /* EQ_Arm_DB_Region */ @@ -1143,7 +1143,7 @@ struct arbelprm_eq_arm_db_region_st { /* Little Endian */ pseudo_bit_t eq_x_arm_l[0x00020]; /* EQ[31:0] X state. This register is used to Arm EQs when setting the appropriate bits. */ /* -------------- */ -}; +}; /* EQ Set CI DBs Table */ @@ -1404,7 +1404,7 @@ struct arbelprm_eq_set_ci_table_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved63[0x00020]; /* -------------- */ -}; +}; /* InfiniHost-III-EX Configuration Registers */ @@ -1415,7 +1415,7 @@ struct arbelprm_configuration_registers_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x3fcb20]; /* -------------- */ -}; +}; /* QP_DB_Record */ @@ -1429,7 +1429,7 @@ struct arbelprm_qp_db_record_st { /* Little Endian */ 0x5 for SRQ */ pseudo_bit_t qp_number[0x00018]; /* QP number */ /* -------------- */ -}; +}; /* CQ_ARM_DB_Record */ @@ -1445,7 +1445,7 @@ struct arbelprm_cq_arm_db_record_st { /* Little Endian */ pseudo_bit_t res[0x00003]; /* Must be 0x2 */ pseudo_bit_t cq_number[0x00018]; /* CQ number */ /* -------------- */ -}; +}; /* CQ_CI_DB_Record */ @@ -1456,7 +1456,7 @@ struct arbelprm_cq_ci_db_record_st { /* Little Endian */ pseudo_bit_t res[0x00003]; /* Must be 0x1 */ pseudo_bit_t cq_number[0x00018]; /* CQ number */ /* -------------- */ -}; +}; /* Virtual_Physical_Mapping */ @@ -1472,7 +1472,7 @@ struct arbelprm_virtual_physical_mapping_st { /* Little Endian */ pseudo_bit_t reserved1[0x00006]; pseudo_bit_t pa_l[0x00014]; /* Physical Address[31:12] */ /* -------------- */ -}; +}; /* MOD_STAT_CFG */ @@ -1485,7 +1485,7 @@ struct arbelprm_mod_stat_cfg_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x007e0]; /* -------------- */ -}; +}; /* SRQ Context */ @@ -1528,7 +1528,7 @@ struct arbelprm_srq_context_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x00060]; /* -------------- */ -}; +}; /* PBL */ @@ -1549,7 +1549,7 @@ struct arbelprm_pbl_st { /* Little Endian */ /* -------------- */ pseudo_bit_t mtt_3_l[0x00020]; /* Fourth MTT[31:0] */ /* -------------- */ -}; +}; /* Performance Counters */ @@ -1580,7 +1580,7 @@ struct arbelprm_performance_counters_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00620]; /* -------------- */ -}; +}; /* Transport and CI Error Counters */ @@ -1724,7 +1724,7 @@ struct arbelprm_transport_and_ci_error_counters_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved12[0x002a0]; /* -------------- */ -}; +}; /* Event_data Field - HCR Completion Event */ @@ -1743,7 +1743,7 @@ struct arbelprm_hcr_completion_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00020]; /* -------------- */ -}; +}; /* Completion with Error CQE */ @@ -1791,7 +1791,7 @@ struct arbelprm_completion_with_error_st { /* Little Endian */ 0xFE - For completion with error on Receive Queues 0xFF - For completion with error on Send Queues */ /* -------------- */ -}; +}; /* Resize CQ Input Mailbox */ @@ -1814,7 +1814,7 @@ struct arbelprm_resize_cq_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x00100]; /* -------------- */ -}; +}; /* MAD_IFC Input Modifier */ @@ -1826,7 +1826,7 @@ struct arbelprm_mad_ifc_input_modifier_st { /* Little Endian */ pseudo_bit_t rlid[0x00010]; /* Remote (source) LID from the received MAD. This field is required for trap generation upon MKey/BKey validation. */ /* -------------- */ -}; +}; /* MAD_IFC Input Mailbox */ @@ -1863,7 +1863,7 @@ struct arbelprm_mad_ifc_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved5[0x004c0]; /* -------------- */ -}; +}; /* Query Debug Message */ @@ -1924,7 +1924,7 @@ struct arbelprm_query_debug_msg_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00400]; /* -------------- */ -}; +}; /* User Access Region */ @@ -1939,7 +1939,7 @@ struct arbelprm_uar_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x03ec0]; /* -------------- */ -}; +}; /* Receive doorbell */ @@ -1953,7 +1953,7 @@ struct arbelprm_receive_doorbell_st { /* Little Endian */ pseudo_bit_t reserved3[0x00002]; pseudo_bit_t qpn[0x00018]; /* QP number or SRQ number this doorbell is rung on */ /* -------------- */ -}; +}; /* SET_IB Parameters */ @@ -1974,7 +1974,7 @@ struct arbelprm_set_ib_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x00180]; /* -------------- */ -}; +}; /* Multicast Group Member */ @@ -2014,7 +2014,7 @@ struct arbelprm_mgm_entry_st { /* Little Endian */ /* -------------- */ struct arbelprm_mgmqp_st mgmqp_7; /* Multicast Group Member QP */ /* -------------- */ -}; +}; /* INIT_IB Parameters */ @@ -2068,7 +2068,7 @@ struct arbelprm_init_ib_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved5[0x006c0]; /* -------------- */ -}; +}; /* Query Device Limitations */ @@ -2285,7 +2285,7 @@ struct arbelprm_query_dev_lim_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved41[0x002c0]; /* -------------- */ -}; +}; /* QUERY_ADAPTER Parameters Block */ @@ -2299,7 +2299,7 @@ struct arbelprm_query_adapter_st { /* Little Endian */ /* -------------- */ struct arbelprm_vsd_st vsd; /* -------------- */ -}; +}; /* QUERY_FW Parameters Block */ @@ -2375,7 +2375,7 @@ struct arbelprm_query_fw_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved6[0x004c0]; /* -------------- */ -}; +}; /* ACCESS_LAM */ @@ -2384,7 +2384,7 @@ struct arbelprm_access_lam_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved0[0x00080]; /* -------------- */ -}; +}; /* ENABLE_LAM Parameters Block */ @@ -2418,7 +2418,7 @@ struct arbelprm_enable_lam_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x00400]; /* -------------- */ -}; +}; /* Memory Access Parameters for UD Address Vector Table */ @@ -2430,7 +2430,7 @@ struct arbelprm_udavtable_memory_parameters_st { /* Little Endian */ pseudo_bit_t xlation_en[0x00001]; /* When cleared, address is physical address and no translation will be done. When set, address is virtual. */ pseudo_bit_t reserved1[0x00002]; /* -------------- */ -}; +}; /* INIT_HCA & QUERY_HCA Parameters Block */ @@ -2495,7 +2495,7 @@ struct arbelprm_init_hca_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved11[0x00600]; /* -------------- */ -}; +}; /* Event Queue Context Table Entry */ @@ -2555,7 +2555,7 @@ struct arbelprm_eqc_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved9[0x00080]; /* -------------- */ -}; +}; /* Memory Translation Table (MTT) Entry */ @@ -2566,7 +2566,7 @@ struct arbelprm_mtt_st { /* Little Endian */ pseudo_bit_t reserved0[0x0000b]; pseudo_bit_t ptag_l[0x00014]; /* Low-order bits of Physical tag. The size of the field depends on the page size of the region. Maximum PTAG size is 52 bits. */ /* -------------- */ -}; +}; /* Memory Protection Table (MPT) Entry */ @@ -2641,7 +2641,7 @@ struct arbelprm_mpt_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved8[0x00040]; /* -------------- */ -}; +}; /* Completion Queue Context Table Entry */ @@ -2719,7 +2719,7 @@ struct arbelprm_completion_queue_context_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved8[0x00020]; /* -------------- */ -}; +}; /* GPIO_event_data */ @@ -2732,7 +2732,7 @@ struct arbelprm_gpio_event_data_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00020]; /* -------------- */ -}; +}; /* Event_data Field - QP/EE Events */ @@ -2749,7 +2749,7 @@ struct arbelprm_qp_ee_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x00060]; /* -------------- */ -}; +}; /* InfiniHost-III-EX Type0 Configuration Header */ @@ -2947,7 +2947,7 @@ struct arbelprm_mt25208_type0_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved13[0x006a0]; /* -------------- */ -}; +}; /* Event Data Field - Performance Monitor */ @@ -2965,7 +2965,7 @@ struct arbelprm_performance_monitor_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00040]; /* -------------- */ -}; +}; /* Event_data Field - Page Faults */ @@ -2989,7 +2989,7 @@ struct arbelprm_page_fault_event_data_st { /* Little Endian */ /* -------------- */ pseudo_bit_t prefetch_len[0x00020]; /* Indicates how many subsequent pages in the same memory region/window will be accessed by the following transaction after this page fault is resolved. measured in bytes. SW can use this information in order to page-in the subsequent pages if they are not present. */ /* -------------- */ -}; +}; /* WQE segments format */ @@ -3006,7 +3006,7 @@ struct arbelprm_wqe_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x00080]; /* -------------- */ -}; +}; /* Event_data Field - Port State Change */ @@ -3019,7 +3019,7 @@ struct arbelprm_port_state_change_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00060]; /* -------------- */ -}; +}; /* Event_data Field - Completion Queue Error */ @@ -3036,7 +3036,7 @@ struct arbelprm_completion_queue_error_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00060]; /* -------------- */ -}; +}; /* Event_data Field - Completion Event */ @@ -3046,7 +3046,7 @@ struct arbelprm_completion_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x000a0]; /* -------------- */ -}; +}; /* Event Queue Entry */ @@ -3065,7 +3065,7 @@ struct arbelprm_event_queue_entry_st { /* Little Endian */ 1 HW */ pseudo_bit_t reserved3[0x00018]; /* -------------- */ -}; +}; /* QP/EE State Transitions Command Parameters */ @@ -3078,7 +3078,7 @@ struct arbelprm_qp_ee_state_transitions_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x009c0]; /* -------------- */ -}; +}; /* Completion Queue Entry Format */ @@ -3131,7 +3131,7 @@ struct arbelprm_completion_queue_entry_st { /* Little Endian */ 0xFE - For completion with error on Receive Queues 0xFF - For completion with error on Send Queues */ /* -------------- */ -}; +}; /* */ @@ -3152,7 +3152,7 @@ struct arbelprm_ecc_detect_event_data_st { /* Little Endian */ pseudo_bit_t err_ra[0x00010]; pseudo_bit_t err_ca[0x00010]; /* -------------- */ -}; +}; /* Event_data Field - ECC Detection Event */ @@ -3177,7 +3177,7 @@ struct arbelprm_scrubbing_event_st { /* Little Endian */ pseudo_bit_t err_ra[0x00010]; /* Error row address */ pseudo_bit_t err_ca[0x00010]; /* Error column address */ /* -------------- */ -}; +}; /* Miscellaneous Counters */ @@ -3186,28 +3186,28 @@ struct arbelprm_misc_counters_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved0[0x007e0]; /* -------------- */ -}; +}; /* LAM_EN Output Parameter */ struct arbelprm_lam_en_out_param_st { /* Little Endian */ pseudo_bit_t reserved0[0x00040]; /* -------------- */ -}; +}; /* Extended_Completion_Queue_Entry */ struct arbelprm_extended_completion_queue_entry_st { /* Little Endian */ pseudo_bit_t reserved0[0x00020]; /* -------------- */ -}; +}; /* */ struct arbelprm_eq_cmd_doorbell_st { /* Little Endian */ pseudo_bit_t reserved0[0x00020]; /* -------------- */ -}; +}; /* 0 */ @@ -3456,5 +3456,5 @@ struct arbelprm_arbel_prm_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved59[0xffcfc0]; /* -------------- */ -}; +}; #endif /* H_prefix_arbelprm_bits_fixnames_MT25218_PRM_csp_H */ diff --git a/src/drivers/infiniband/MT25408_PRM.h b/src/drivers/infiniband/MT25408_PRM.h index cc248daf9..f8c19ce72 100644 --- a/src/drivers/infiniband/MT25408_PRM.h +++ b/src/drivers/infiniband/MT25408_PRM.h @@ -47,7 +47,7 @@ struct hermonprm_ud_address_vector_st { /* Little Endian */ pseudo_bit_t reserved1[0x00008]; /* -------------- */ pseudo_bit_t hop_limit[0x00008]; /* IPv6 hop limit */ - pseudo_bit_t max_stat_rate[0x00004];/* Maximum static rate control. + pseudo_bit_t max_stat_rate[0x00004];/* Maximum static rate control. 0 - 4X injection rate 1 - 1X injection rate other - reserved @@ -72,7 +72,7 @@ struct hermonprm_ud_address_vector_st { /* Little Endian */ /* -------------- */ pseudo_bit_t rgid_31_0[0x00020]; /* Remote GID[31:0] if G bit is set. Must be set to 0x2 if G bit is cleared. */ /* -------------- */ -}; +}; /* Send doorbell */ @@ -87,7 +87,7 @@ struct hermonprm_send_doorbell_st { /* Little Endian */ pseudo_bit_t reserved1[0x00002]; pseudo_bit_t qpn[0x00018]; /* QP number this doorbell is rung on */ /* -------------- */ -}; +}; /* Send wqe segment data inline */ @@ -101,7 +101,7 @@ struct hermonprm_wqe_segment_data_inline_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x00040]; /* -------------- */ -}; +}; /* Send wqe segment data ptr */ @@ -115,7 +115,7 @@ struct hermonprm_wqe_segment_data_ptr_st { /* Little Endian */ /* -------------- */ pseudo_bit_t local_address_l[0x00020]; /* -------------- */ -}; +}; /* Send wqe segment rd */ @@ -127,7 +127,7 @@ struct hermonprm_local_invalidate_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x000a0]; /* -------------- */ -}; +}; /* Fast_Registration_Segment ####michal - doesn't match PRM (fields were added, see below) new table size in bytes - 0x30 */ @@ -157,7 +157,7 @@ struct hermonprm_fast_registration_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reg_len_l[0x00020]; /* Region Length[31:0] */ /* -------------- */ -}; +}; /* Send wqe segment atomic */ @@ -170,7 +170,7 @@ struct hermonprm_wqe_segment_atomic_st { /* Little Endian */ /* -------------- */ pseudo_bit_t compare_l[0x00020]; /* -------------- */ -}; +}; /* Send wqe segment remote address */ @@ -183,7 +183,7 @@ struct hermonprm_wqe_segment_remote_address_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved0[0x00020]; /* -------------- */ -}; +}; /* end wqe segment bind */ @@ -213,7 +213,7 @@ struct hermonprm_wqe_segment_bind_st { /* Little Endian */ /* -------------- */ pseudo_bit_t length_l[0x00020]; /* -------------- */ -}; +}; /* Send wqe segment ud */ @@ -227,7 +227,7 @@ struct hermonprm_wqe_segment_ud_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00040]; /* -------------- */ -}; +}; /* Send wqe segment rd */ @@ -239,7 +239,7 @@ struct hermonprm_wqe_segment_rd_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00040]; /* -------------- */ -}; +}; /* Send wqe segment ctrl */ @@ -263,7 +263,7 @@ struct hermonprm_wqe_segment_ctrl_send_st { /* Little Endian */ /* -------------- */ pseudo_bit_t immediate[0x00020]; /* If the OpCode encodes an operation with Immediate (RDMA-write/SEND), This field will hold the Immediate data to be sent. If the OpCode encodes send and invalidate operations, this field holds the Invalidation key to be inserted into the packet; otherwise, this field is reserved. */ /* -------------- */ -}; +}; /* Address Path # ###michal - match to PRM */ @@ -280,19 +280,19 @@ struct hermonprm_address_path_st { /* Little Endian */ pseudo_bit_t reserved1[0x00008]; /* -------------- */ pseudo_bit_t hop_limit[0x00008]; /* IPv6 hop limit */ - pseudo_bit_t max_stat_rate[0x00004];/* Maximum static rate control. - 0 - 100% injection rate + pseudo_bit_t max_stat_rate[0x00004];/* Maximum static rate control. + 0 - 100% injection rate 1 - 25% injection rate 2 - 12.5% injection rate 3 - 50% injection rate - 7: 2.5 Gb/s. - 8: 10 Gb/s. - 9: 30 Gb/s. - 10: 5 Gb/s. + 7: 2.5 Gb/s. + 8: 10 Gb/s. + 9: 30 Gb/s. + 10: 5 Gb/s. 11: 20 Gb/s. - 12: 40 Gb/s. - 13: 60 Gb/s. - 14: 80 Gb/s. + 12: 40 Gb/s. + 13: 60 Gb/s. + 14: 80 Gb/s. 15: 120 Gb/s. */ pseudo_bit_t reserved2[0x00004]; pseudo_bit_t mgid_index[0x00007]; /* Index to port GID table */ @@ -328,7 +328,7 @@ struct hermonprm_address_path_st { /* Little Endian */ /* -------------- */ pseudo_bit_t dmac_31_0[0x00020]; /* -------------- */ -}; +}; /* HCA Command Register (HCR) #### michal - match PRM */ @@ -359,7 +359,7 @@ struct hermonprm_hca_command_register_st { /* Little Endian */ pseudo_bit_t status[0x00008]; /* Command execution status report. Valid only if command interface in under SW ownership (Go bit is cleared) 0 - command completed without error. If different than zero, command execution completed with error. Syndrom encoding is depended on command executed and is defined for each command */ /* -------------- */ -}; +}; /* CQ Doorbell */ @@ -373,14 +373,14 @@ struct hermonprm_cq_cmd_doorbell_st { /* Little Endian */ Other - Reserved */ pseudo_bit_t reserved0[0x00001]; pseudo_bit_t cmd_sn[0x00002]; /* Command Sequence Number - This field should be incremented upon receiving completion notification of the respective CQ. - This transition is done by ringing Request notification for next Solicited, Request notification for next Solicited or Unsolicited + This transition is done by ringing Request notification for next Solicited, Request notification for next Solicited or Unsolicited completion or Request notification for multiple completions doorbells after receiving completion notification. This field is initialized to Zero */ pseudo_bit_t reserved1[0x00002]; /* -------------- */ pseudo_bit_t cq_param[0x00020]; /* parameter to be used by CQ command */ /* -------------- */ -}; +}; /* RD-send doorbell */ @@ -394,7 +394,7 @@ struct hermonprm_rd_send_doorbell_st { /* Little Endian */ /* -------------- */ struct hermonprm_send_doorbell_st send_doorbell;/* Send Parameters */ /* -------------- */ -}; +}; /* Multicast Group Member QP #### michal - match PRM */ @@ -404,7 +404,7 @@ struct hermonprm_mgmqp_st { /* Little Endian */ pseudo_bit_t blck_lb[0x00001]; /* Block self-loopback messages arriving to this qp */ pseudo_bit_t qi[0x00001]; /* Qi: QPN_i is valid */ /* -------------- */ -}; +}; /* vsd */ @@ -521,7 +521,7 @@ struct hermonprm_vsd_st { /* Little Endian */ /* -------------- */ pseudo_bit_t vsd_dw55[0x00020]; /* -------------- */ -}; +}; /* UAR Parameters */ @@ -535,7 +535,7 @@ struct hermonprm_uar_params_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x000a0]; /* -------------- */ -}; +}; /* Translation and Protection Tables Parameters */ @@ -552,7 +552,7 @@ struct hermonprm_tptparams_st { /* Little Endian */ /* -------------- */ pseudo_bit_t log_dmpt_sz[0x00006]; /* Log (base 2) of the number of region/windows entries in the dMPT table. */ pseudo_bit_t reserved0[0x00002]; - pseudo_bit_t pfto[0x00005]; /* Page Fault RNR Timeout - + pseudo_bit_t pfto[0x00005]; /* Page Fault RNR Timeout - The field returned in RNR Naks generated when a page fault is detected. It has no effect when on-demand-paging is not used. */ pseudo_bit_t reserved1[0x00013]; @@ -575,7 +575,7 @@ struct hermonprm_tptparams_st { /* Little Endian */ Entry size is 64 bytes. Table must be aligned to its size. */ /* -------------- */ -}; +}; /* Multicast Support Parameters #### michal - match PRM */ @@ -584,14 +584,14 @@ struct hermonprm_multicastparam_st { /* Little Endian */ The base address must be aligned to the entry size. Address may be set to 0xFFFFFFFF if multicast is not supported. */ /* -------------- */ - pseudo_bit_t mc_base_addr_l[0x00020];/* Base Address of the Multicast Table [31:0]. + pseudo_bit_t mc_base_addr_l[0x00020];/* Base Address of the Multicast Table [31:0]. The base address must be aligned to the entry size. Address may be set to 0xFFFFFFFF if multicast is not supported. */ /* -------------- */ pseudo_bit_t reserved0[0x00040]; /* -------------- */ pseudo_bit_t log_mc_table_entry_sz[0x00005];/* Log2 of the Size of multicast group member (MGM) entry. - Must be greater than 5 (to allow CTRL and GID sections). + Must be greater than 5 (to allow CTRL and GID sections). That implies the number of QPs per MC table entry. */ pseudo_bit_t reserved1[0x0000b]; pseudo_bit_t reserved2[0x00010]; @@ -611,7 +611,7 @@ struct hermonprm_multicastparam_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved6[0x00020]; /* -------------- */ -}; +}; /* QPC/EEC/CQC/EQC/RDB Parameters #### michal - doesn't match PRM (field name are differs. see below) */ @@ -679,12 +679,12 @@ struct hermonprm_qpcbaseaddr_st { /* Little Endian */ /* -------------- */ pseudo_bit_t log_num_rd[0x00003]; /* Log (base 2) of the maximum number of RdmaRdC entries per QP. This denotes the maximum number of outstanding reads/atomics as a responder. */ pseudo_bit_t reserved7[0x00002]; - pseudo_bit_t rdmardc_base_addr_l[0x0001b];/* rdmardc_base_addr_l: Base address of table that holds remote read and remote atomic requests [31:0]. + pseudo_bit_t rdmardc_base_addr_l[0x0001b];/* rdmardc_base_addr_l: Base address of table that holds remote read and remote atomic requests [31:0]. Table must be aligned to RDB entry size (32 bytes). */ /* -------------- */ pseudo_bit_t reserved8[0x00040]; /* -------------- */ -}; +}; /* Header_Log_Register */ @@ -693,7 +693,7 @@ struct hermonprm_header_log_register_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved0[0x00060]; /* -------------- */ -}; +}; /* Performance Monitors */ @@ -725,7 +725,7 @@ struct hermonprm_performance_monitors_st { /* Little Endian */ /* -------------- */ pseudo_bit_t event_counter2[0x00020];/* Read/write event counter, counting events specified by EvCntl and EvCnt2 fields repsectively. When the event counter reaches is maximum value of 0xFFFFFF, the next event will cause it to roll over to zero, set F1 or F2 bit respectively and generate interrupt by I1 I2 bit respectively. */ /* -------------- */ -}; +}; /* MLX WQE segment format */ @@ -751,7 +751,7 @@ struct hermonprm_wqe_segment_ctrl_mlx_st { /* Little Endian */ pseudo_bit_t reserved5[0x00010]; pseudo_bit_t rlid[0x00010]; /* Destination LID (must match given headers) */ /* -------------- */ -}; +}; /* Send WQE segment format */ @@ -780,7 +780,7 @@ struct hermonprm_send_wqe_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00200]; /* -------------- */ -}; +}; /* QP and EE Context Entry */ @@ -833,7 +833,7 @@ struct hermonprm_queue_pair_ee_context_entry_st { /* Little Endian */ 0x3 - 1024 0x4 - 2048 other - reserved - + Should be configured to 0x4 for UD and MLX QPs. */ /* -------------- */ pseudo_bit_t usr_page[0x00018]; /* UAR number to ring doorbells for this QP (aliased to doorbell and Blue Flame pages) */ @@ -883,7 +883,7 @@ struct hermonprm_queue_pair_ee_context_entry_st { /* Little Endian */ pseudo_bit_t reserved24[0x00008]; /* -------------- */ pseudo_bit_t reserved25[0x00004]; - pseudo_bit_t ric[0x00001]; /* Invalid Credits. + pseudo_bit_t ric[0x00001]; /* Invalid Credits. 1 - place "Invalid Credits" to ACKs sent from this queue. 0 - ACKs report the actual number of end to end credits on the connection. Not valid (reserved) in EE context. @@ -895,12 +895,12 @@ struct hermonprm_queue_pair_ee_context_entry_st { /* Little Endian */ pseudo_bit_t rwe[0x00001]; /* If set - RDMA - write enabled on receive queue. Not valid (reserved) in EE context. */ pseudo_bit_t rre[0x00001]; /* If set - RDMA - read enabled on receive queue. Not valid (reserved) in EE context. */ pseudo_bit_t reserved28[0x00005]; - pseudo_bit_t rra_max[0x00003]; /* Maximum number of outstanding RDMA-read/Atomic operations allowed on receive queue is 2^RRA_Max. + pseudo_bit_t rra_max[0x00003]; /* Maximum number of outstanding RDMA-read/Atomic operations allowed on receive queue is 2^RRA_Max. Must be 0 for EE context. */ pseudo_bit_t physical_function[0x00008]; /* -------------- */ pseudo_bit_t next_rcv_psn[0x00018]; /* Next (expected) PSN on receive */ - pseudo_bit_t min_rnr_nak[0x00005]; /* Minimum RNR NAK timer value (TTTTT field encoding according to the IB spec Vol1 9.7.5.2.8). + pseudo_bit_t min_rnr_nak[0x00005]; /* Minimum RNR NAK timer value (TTTTT field encoding according to the IB spec Vol1 9.7.5.2.8). Not valid (reserved) in EE context. */ pseudo_bit_t reserved30[0x00003]; /* -------------- */ @@ -919,7 +919,7 @@ struct hermonprm_queue_pair_ee_context_entry_st { /* Little Endian */ On send datagrams, if Q_Key[31] specified in the WQE is set, then this Q_Key will be transmitted in the outgoing message. Not valid (reserved) in EE context. */ /* -------------- */ - pseudo_bit_t srqn[0x00018]; /* SRQN - Shared Receive Queue Number - specifies the SRQ number from which the QP dequeues receive descriptors. + pseudo_bit_t srqn[0x00018]; /* SRQN - Shared Receive Queue Number - specifies the SRQ number from which the QP dequeues receive descriptors. SRQN is valid only if SRQ bit is set. Not valid (reserved) in EE context. */ pseudo_bit_t srq[0x00001]; /* SRQ - Shared Receive Queue. If this bit is set, then the QP is associated with a SRQ. Not valid (reserved) in EE context. */ pseudo_bit_t reserved34[0x00007]; @@ -983,7 +983,7 @@ struct hermonprm_queue_pair_ee_context_entry_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved48[0x000c0]; /* -------------- */ -}; +}; /* */ @@ -993,21 +993,21 @@ struct hermonprm_mcg_qp_dw_st { /* Little Endian */ pseudo_bit_t blck_lb[0x00001]; pseudo_bit_t reserved1[0x00001]; /* -------------- */ -}; +}; /* Clear Interrupt [63:0] #### michal - match to PRM */ struct hermonprm_clr_int_st { /* Little Endian */ pseudo_bit_t clr_int_h[0x00020]; /* Clear Interrupt [63:32] - Write transactions to this register will clear (de-assert) the virtual interrupt output pins of InfiniHost-III-EX. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. + Write transactions to this register will clear (de-assert) the virtual interrupt output pins of InfiniHost-III-EX. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. This register is write-only. Reading from this register will cause undefined result */ /* -------------- */ pseudo_bit_t clr_int_l[0x00020]; /* Clear Interrupt [31:0] - Write transactions to this register will clear (de-assert) the virtual interrupt output pins of InfiniHost-III-EX. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. + Write transactions to this register will clear (de-assert) the virtual interrupt output pins of InfiniHost-III-EX. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. This register is write-only. Reading from this register will cause undefined result */ /* -------------- */ -}; +}; /* EQ Set CI DBs Table */ @@ -1268,7 +1268,7 @@ struct hermonprm_eq_set_ci_table_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved63[0x00020]; /* -------------- */ -}; +}; /* InfiniHost-III-EX Configuration Registers #### michal - match to PRM */ @@ -1279,7 +1279,7 @@ struct hermonprm_configuration_registers_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x3fcb20]; /* -------------- */ -}; +}; /* QP_DB_Record ### michal = gdror fixed */ @@ -1287,7 +1287,7 @@ struct hermonprm_qp_db_record_st { /* Little Endian */ pseudo_bit_t receive_wqe_counter[0x00010];/* Modulo-64K counter of WQEs posted to the QP since its creation. Should be initialized to zero. */ pseudo_bit_t reserved0[0x00010]; /* -------------- */ -}; +}; /* CQ_ARM_DB_Record */ @@ -1303,7 +1303,7 @@ struct hermonprm_cq_arm_db_record_st { /* Little Endian */ pseudo_bit_t res[0x00003]; /* Must be 0x2 */ pseudo_bit_t cq_number[0x00018]; /* CQ number */ /* -------------- */ -}; +}; /* CQ_CI_DB_Record */ @@ -1314,7 +1314,7 @@ struct hermonprm_cq_ci_db_record_st { /* Little Endian */ pseudo_bit_t res[0x00003]; /* Must be 0x1 */ pseudo_bit_t cq_number[0x00018]; /* CQ number */ /* -------------- */ -}; +}; /* Virtual_Physical_Mapping */ @@ -1330,7 +1330,7 @@ struct hermonprm_virtual_physical_mapping_st { /* Little Endian */ pseudo_bit_t reserved1[0x00006]; pseudo_bit_t pa_l[0x00014]; /* Physical Address[31:12] */ /* -------------- */ -}; +}; /* MOD_STAT_CFG #### michal - gdror fix */ @@ -1518,7 +1518,7 @@ struct hermonprm_srq_context_st { /* Little Endian */ pseudo_bit_t reserved10[0x00002]; pseudo_bit_t db_record_addr_l[0x0001e];/* SRQ DB Record physical address [31:2] */ /* -------------- */ -}; +}; /* PBL */ @@ -1539,7 +1539,7 @@ struct hermonprm_pbl_st { /* Little Endian */ /* -------------- */ pseudo_bit_t mtt_3_l[0x00020]; /* Fourth MTT[31:0] */ /* -------------- */ -}; +}; /* Performance Counters #### michal - gdror fixed */ @@ -1554,7 +1554,7 @@ struct hermonprm_performance_counters_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x00620]; /* -------------- */ -}; +}; /* Transport and CI Error Counters */ @@ -1575,10 +1575,10 @@ struct hermonprm_transport_and_ci_error_counters_st { /* Little Endian */ /* -------------- */ pseudo_bit_t sq_num_lpe[0x00020]; /* Requester - number of local protection errors */ /* -------------- */ - pseudo_bit_t rq_num_wrfe[0x00020]; /* Responder - number of CQEs with error. + pseudo_bit_t rq_num_wrfe[0x00020]; /* Responder - number of CQEs with error. Incremented each time a CQE with error is generated */ /* -------------- */ - pseudo_bit_t sq_num_wrfe[0x00020]; /* Requester - number of CQEs with error. + pseudo_bit_t sq_num_wrfe[0x00020]; /* Requester - number of CQEs with error. Incremented each time a CQE with error is generated */ /* -------------- */ pseudo_bit_t reserved0[0x00020]; @@ -1698,7 +1698,7 @@ struct hermonprm_transport_and_ci_error_counters_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved12[0x002a0]; /* -------------- */ -}; +}; /* Event_data Field - HCR Completion Event #### michal - match PRM */ @@ -1717,7 +1717,7 @@ struct hermonprm_hcr_completion_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00020]; /* -------------- */ -}; +}; /* Completion with Error CQE #### michal - gdror fixed */ @@ -1732,7 +1732,7 @@ struct hermonprm_completion_with_error_st { /* Little Endian */ 0x02 - Local QP Operation Error 0x03 - Local EE Context Operation Error 0x04 - Local Protection Error - 0x05 - Work Request Flushed Error + 0x05 - Work Request Flushed Error 0x06 - Memory Window Bind Error 0x10 - Bad Response Error 0x11 - Local Access Error @@ -1752,7 +1752,7 @@ struct hermonprm_completion_with_error_st { /* Little Endian */ pseudo_bit_t wqe_counter[0x00010]; /* -------------- */ pseudo_bit_t opcode[0x00005]; /* The opcode of WQE completion is reported for. - + The following values are reported in case of completion with error: 0xFE - For completion with error on Receive Queues 0xFF - For completion with error on Send Queues */ @@ -1761,7 +1761,7 @@ struct hermonprm_completion_with_error_st { /* Little Endian */ pseudo_bit_t owner[0x00001]; /* HW Flips this bit for every CQ warp around. Initialized to Zero. */ pseudo_bit_t reserved3[0x00018]; /* -------------- */ -}; +}; /* Resize CQ Input Mailbox */ @@ -1790,7 +1790,7 @@ struct hermonprm_resize_cq_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved10[0x00100]; /* -------------- */ -}; +}; /* MAD_IFC Input Modifier */ @@ -1802,14 +1802,14 @@ struct hermonprm_mad_ifc_input_modifier_st { /* Little Endian */ pseudo_bit_t rlid[0x00010]; /* Remote (source) LID from the received MAD. This field is required for trap generation upon MKey/BKey validation. */ /* -------------- */ -}; +}; /* MAD_IFC Input Mailbox ###michal -gdror fixed */ struct hermonprm_mad_ifc_st { /* Little Endian */ pseudo_bit_t request_mad_packet[64][0x00020];/* Request MAD Packet (256bytes) */ /* -------------- */ - pseudo_bit_t my_qpn[0x00018]; /* Destination QP number from the received MAD. + pseudo_bit_t my_qpn[0x00018]; /* Destination QP number from the received MAD. This field is reserved if Mad_extended_info indication in the input modifier is clear. */ pseudo_bit_t reserved0[0x00008]; /* -------------- */ @@ -1822,25 +1822,25 @@ struct hermonprm_mad_ifc_st { /* Little Endian */ pseudo_bit_t reserved3[0x00010]; pseudo_bit_t ml_path[0x00007]; /* My (destination) LID path bits from the received MAD. This field is reserved if Mad_extended_info indication in the input modifier is clear. */ - pseudo_bit_t g[0x00001]; /* If set, the GRH field in valid. + pseudo_bit_t g[0x00001]; /* If set, the GRH field in valid. This field is reserved if Mad_extended_info indication in the input modifier is clear. */ pseudo_bit_t reserved4[0x00004]; pseudo_bit_t sl[0x00004]; /* Service Level of the received MAD. This field is reserved if Mad_extended_info indication in the input modifier is clear. */ /* -------------- */ - pseudo_bit_t pkey_indx[0x00010]; /* Index in PKey table that matches PKey of the received MAD. + pseudo_bit_t pkey_indx[0x00010]; /* Index in PKey table that matches PKey of the received MAD. This field is reserved if Mad_extended_info indication in the input modifier is clear. */ pseudo_bit_t reserved5[0x00010]; /* -------------- */ pseudo_bit_t reserved6[0x00160]; /* -------------- */ - pseudo_bit_t grh[10][0x00020]; /* The GRH field of the MAD packet that was scattered to the first 40 bytes pointed to by the scatter list. - Valid if Mad_extended_info bit (in the input modifier) and g bit are set. + pseudo_bit_t grh[10][0x00020]; /* The GRH field of the MAD packet that was scattered to the first 40 bytes pointed to by the scatter list. + Valid if Mad_extended_info bit (in the input modifier) and g bit are set. Otherwise this field is reserved. */ /* -------------- */ pseudo_bit_t reserved7[0x004c0]; /* -------------- */ -}; +}; /* Query Debug Message #### michal - gdror fixed */ @@ -1899,7 +1899,7 @@ struct hermonprm_query_debug_msg_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x003c0]; /* -------------- */ -}; +}; /* User Access Region */ @@ -1914,7 +1914,7 @@ struct hermonprm_uar_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x03ec0]; /* -------------- */ -}; +}; /* Receive doorbell */ @@ -1928,7 +1928,7 @@ struct hermonprm_receive_doorbell_st { /* Little Endian */ pseudo_bit_t reserved3[0x00002]; pseudo_bit_t qpn[0x00018]; /* QP number or SRQ number this doorbell is rung on */ /* -------------- */ -}; +}; /* SET_IB Parameters */ @@ -1949,7 +1949,7 @@ struct hermonprm_set_ib_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x00180]; /* -------------- */ -}; +}; /* Multicast Group Member #### michal - gdror fixed */ @@ -1989,7 +1989,7 @@ struct hermonprm_mgm_entry_st { /* Little Endian */ /* -------------- */ struct hermonprm_mgmqp_st mgmqp_7; /* Multicast Group Member QP */ /* -------------- */ -}; +}; /* INIT_PORT Parameters #### michal - match PRM */ @@ -2041,7 +2041,7 @@ struct hermonprm_init_port_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved5[0x006c0]; /* -------------- */ -}; +}; /* Query Device Capablities #### michal - gdror fixed */ @@ -2267,7 +2267,7 @@ struct hermonprm_query_dev_cap_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved46[0x002c0]; /* -------------- */ -}; +}; /* QUERY_ADAPTER Parameters Block #### michal - gdror fixed */ @@ -2281,7 +2281,7 @@ struct hermonprm_query_adapter_st { /* Little Endian */ /* -------------- */ struct hermonprm_vsd_st vsd; /* ###michal- this field was replaced by 2 fields : vsd .1664; vsd(continued/psid .128; */ /* -------------- */ -}; +}; /* QUERY_FW Parameters Block #### michal - doesn't match PRM */ @@ -2298,7 +2298,7 @@ struct hermonprm_query_fw_st { /* Little Endian */ pseudo_bit_t log_max_outstanding_cmd[0x00008];/* Log2 of the maximum number of commands the HCR can support simultaneously */ pseudo_bit_t reserved1[0x00017]; pseudo_bit_t dt[0x00001]; /* Debug Trace Support - 0 - Debug trace is not supported + 0 - Debug trace is not supported 1 - Debug trace is supported */ /* -------------- */ pseudo_bit_t reserved2[0x00001]; @@ -2346,7 +2346,7 @@ struct hermonprm_query_fw_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved8[0x00600]; /* -------------- */ -}; +}; /* Memory Access Parameters for UD Address Vector Table */ @@ -2358,7 +2358,7 @@ struct hermonprm_udavtable_memory_parameters_st { /* Little Endian */ pseudo_bit_t xlation_en[0x00001]; /* When cleared, address is physical address and no translation will be done. When set, address is virtual. */ pseudo_bit_t reserved1[0x00002]; /* -------------- */ -}; +}; /* INIT_HCA & QUERY_HCA Parameters Block ####michal-doesn't match PRM (see differs below) new size in bytes:0x300 */ @@ -2407,7 +2407,7 @@ struct hermonprm_init_hca_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved10[0x00600]; /* -------------- */ -}; +}; /* Event Queue Context Table Entry #### michal - gdror fixed */ @@ -2454,19 +2454,19 @@ struct hermonprm_eqc_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved12[0x00040]; /* -------------- */ - pseudo_bit_t consumer_counter[0x00018];/* Consumer counter. The counter is incremented for each EQE polled from the EQ. - Must be 0x0 in EQ initialization. + pseudo_bit_t consumer_counter[0x00018];/* Consumer counter. The counter is incremented for each EQE polled from the EQ. + Must be 0x0 in EQ initialization. Maintained by HW (valid for the QUERY_EQ command only). */ pseudo_bit_t reserved13[0x00008]; /* -------------- */ - pseudo_bit_t producer_counter[0x00018];/* Producer Coutner. The counter is incremented for each EQE that is written by the HW to the EQ. + pseudo_bit_t producer_counter[0x00018];/* Producer Coutner. The counter is incremented for each EQE that is written by the HW to the EQ. EQ overrun is reported if Producer_counter + 1 equals to Consumer_counter and a EQE needs to be added. Maintained by HW (valid for the QUERY_EQ command only) */ pseudo_bit_t reserved14[0x00008]; /* -------------- */ pseudo_bit_t reserved15[0x00080]; /* -------------- */ -}; +}; /* Memory Translation Table (MTT) Entry #### michal - match to PRM */ @@ -2477,7 +2477,7 @@ struct hermonprm_mtt_st { /* Little Endian */ pseudo_bit_t reserved0[0x00002]; pseudo_bit_t ptag_l[0x0001d]; /* Low-order bits of Physical tag. The size of the field depends on the page size of the region. Maximum PTAG size is 52 bits. */ /* -------------- */ -}; +}; /* Memory Protection Table (MPT) Entry ### doesn't match PRM (new fields were added). new size in bytes : 0x54 */ @@ -2547,7 +2547,7 @@ struct hermonprm_mpt_st { /* Little Endian */ pseudo_bit_t mtt_fbo[0x00015]; /* First byte offset in the zero-based region - the first byte within the first block/page start address refers to. When mtt_rep is being used, fbo points within the replicated block (i.e. block-size x 2^mtt_rep) */ pseudo_bit_t reserved10[0x0000b]; /* -------------- */ -}; +}; /* Completion Queue Context Table Entry #### michal - match PRM */ @@ -2559,7 +2559,7 @@ struct hermonprm_completion_queue_context_st { /* Little Endian */ 0x6 - ARMED SOLICITED (Request Solicited Notification) 0xA - FIRED other - reserved - + Must be 0x0 in CQ initialization. Valid for the QUERY_CQ and HW2SW_CQ commands only. */ pseudo_bit_t reserved1[0x00005]; @@ -2605,7 +2605,7 @@ struct hermonprm_completion_queue_context_st { /* Little Endian */ pseudo_bit_t reserved11[0x00008]; /* -------------- */ pseudo_bit_t solicit_producer_indx[0x00018];/* Maintained by HW. - Valid for QUERY_CQ and HW2SW_CQ commands only. + Valid for QUERY_CQ and HW2SW_CQ commands only. */ pseudo_bit_t reserved12[0x00008]; /* -------------- */ @@ -2627,7 +2627,7 @@ struct hermonprm_completion_queue_context_st { /* Little Endian */ pseudo_bit_t reserved17[0x00003]; pseudo_bit_t db_record_addr_l[0x0001d];/* CQ DB Record physical address [31:3] */ /* -------------- */ -}; +}; /* GPIO_event_data #### michal - gdror fixed */ @@ -2640,7 +2640,7 @@ struct hermonprm_gpio_event_data_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00020]; /* -------------- */ -}; +}; /* Event_data Field - QP/EE Events #### michal - doesn't match PRM */ @@ -2657,7 +2657,7 @@ struct hermonprm_qp_ee_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved4[0x00060]; /* -------------- */ -}; +}; /* InfiniHost-III-EX Type0 Configuration Header ####michal - doesn't match PRM (new fields added, see below) */ @@ -2803,19 +2803,19 @@ struct hermonprm_mt25208_type0_st { /* Little Endian */ /* -------------- */ pseudo_bit_t uncorrectable_error_status_register[0x00020];/* 0 Training Error Status 4 Data Link Protocol Error Status - 12 Poisoned TLP Status - 13 Flow Control Protocol Error Status - 14 Completion Timeout Status - 15 Completer Abort Status - 16 Unexpected Completion Status - 17 Receiver Overflow Status - 18 Malformed TLP Status - 19 ECRC Error Status + 12 Poisoned TLP Status + 13 Flow Control Protocol Error Status + 14 Completion Timeout Status + 15 Completer Abort Status + 16 Unexpected Completion Status + 17 Receiver Overflow Status + 18 Malformed TLP Status + 19 ECRC Error Status 20 Unsupported Request Error Status */ /* -------------- */ pseudo_bit_t uncorrectable_error_mask_register[0x00020];/* 0 Training Error Mask 4 Data Link Protocol Error Mask - 12 Poisoned TLP Mask + 12 Poisoned TLP Mask 13 Flow Control Protocol Error Mask 14 Completion Timeout Mask 15 Completer Abort Mask @@ -2855,7 +2855,7 @@ struct hermonprm_mt25208_type0_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved13[0x006a0]; /* -------------- */ -}; +}; /* Event Data Field - Performance Monitor */ @@ -2873,7 +2873,7 @@ struct hermonprm_performance_monitor_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00040]; /* -------------- */ -}; +}; /* Event_data Field - Page Faults */ @@ -2897,7 +2897,7 @@ struct hermonprm_page_fault_event_data_st { /* Little Endian */ /* -------------- */ pseudo_bit_t prefetch_len[0x00020]; /* Indicates how many subsequent pages in the same memory region/window will be accessed by the following transaction after this page fault is resolved. measured in bytes. SW can use this information in order to page-in the subsequent pages if they are not present. */ /* -------------- */ -}; +}; /* WQE segments format */ @@ -2914,7 +2914,7 @@ struct hermonprm_wqe_segment_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved2[0x00080]; /* -------------- */ -}; +}; /* Event_data Field - Port State Change #### michal - match PRM */ @@ -2927,7 +2927,7 @@ struct hermonprm_port_state_change_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00060]; /* -------------- */ -}; +}; /* Event_data Field - Completion Queue Error #### michal - match PRM */ @@ -2944,7 +2944,7 @@ struct hermonprm_completion_queue_error_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved3[0x00060]; /* -------------- */ -}; +}; /* Event_data Field - Completion Event #### michal - match PRM */ @@ -2954,12 +2954,12 @@ struct hermonprm_completion_event_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x000a0]; /* -------------- */ -}; +}; /* Event Queue Entry #### michal - match to PRM */ struct hermonprm_event_queue_entry_st { /* Little Endian */ - pseudo_bit_t event_sub_type[0x00008];/* Event Sub Type. + pseudo_bit_t event_sub_type[0x00008];/* Event Sub Type. Defined for events which have sub types, zero elsewhere. */ pseudo_bit_t reserved0[0x00008]; pseudo_bit_t event_type[0x00008]; /* Event Type */ @@ -2968,12 +2968,12 @@ struct hermonprm_event_queue_entry_st { /* Little Endian */ pseudo_bit_t event_data[6][0x00020];/* Delivers auxilary data to handle event. */ /* -------------- */ pseudo_bit_t reserved2[0x00007]; - pseudo_bit_t owner[0x00001]; /* Owner of the entry - 0 SW + pseudo_bit_t owner[0x00001]; /* Owner of the entry + 0 SW 1 HW */ pseudo_bit_t reserved3[0x00018]; /* -------------- */ -}; +}; /* QP/EE State Transitions Command Parameters ###michal - doesn't match PRM (field name changed) */ @@ -2986,7 +2986,7 @@ struct hermonprm_qp_ee_state_transitions_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved1[0x00800]; /* -------------- */ -}; +}; /* Completion Queue Entry Format #### michal - fixed by gdror */ @@ -3010,7 +3010,7 @@ struct hermonprm_completion_queue_entry_st { /* Little Endian */ For IPoIB (UD) and RawEth CQEs this field contains the RSS hash function value. Otherwise, this field is reserved. */ /* -------------- */ - pseudo_bit_t srq_rqpn[0x00018]; /* For Responder UD QPs, Remote (source) QP number. + pseudo_bit_t srq_rqpn[0x00018]; /* For Responder UD QPs, Remote (source) QP number. For Responder SRC QPs, SRQ number. Otherwise, this field is reserved. */ pseudo_bit_t ml_path_mac_index[0x00007];/* For responder UD over IB CQE: These are the lower LMC bits of the DLID in an incoming UD packet, higher bits of this field, that are not part of the LMC bits are zeroed by HW. Invalid if incoming message DLID is the permissive LID or incoming message is multicast. @@ -3028,17 +3028,17 @@ struct hermonprm_completion_queue_entry_st { /* Little Endian */ For responder UD over Ethernet and RawEth - it is VLAN-header[15:12] Otherwise, this field is reserved. */ /* -------------- */ - pseudo_bit_t smac31_0_rawether_ipoib_status[0x00020];/* For responder UD over Ethernet - source MAC[31:0] of the packet. - For responder RawEth and UD over IB - RawEth-IPoIB status {3 reserved, ipok,udp,tcp,ipv4opt,ipv6,ipv4vf,ipv4,rht(6),ipv6extmask(6),reserved(2),l2am,reserved(2),bfcs,reserved(2),enc} + pseudo_bit_t smac31_0_rawether_ipoib_status[0x00020];/* For responder UD over Ethernet - source MAC[31:0] of the packet. + For responder RawEth and UD over IB - RawEth-IPoIB status {3 reserved, ipok,udp,tcp,ipv4opt,ipv6,ipv4vf,ipv4,rht(6),ipv6extmask(6),reserved(2),l2am,reserved(2),bfcs,reserved(2),enc} Otherwise, this field is reserved. */ /* -------------- */ - pseudo_bit_t byte_cnt[0x00020]; /* Byte count of data transferred. Applicable for RDMA-read, Atomic and all receive operations. completions. + pseudo_bit_t byte_cnt[0x00020]; /* Byte count of data transferred. Applicable for RDMA-read, Atomic and all receive operations. completions. For Receive Queue that is subject for headers. separation, byte_cnt[31:24] specify number of bytes scattered to the first scatter entry (headers. length). Byte_cnt[23:0] specify total byte count received (including headers). */ /* -------------- */ pseudo_bit_t checksum[0x00010]; /* Valid for RawEth and IPoIB only. */ pseudo_bit_t wqe_counter[0x00010]; /* -------------- */ - pseudo_bit_t opcode[0x00005]; /* Send completions - same encoding as WQE. + pseudo_bit_t opcode[0x00005]; /* Send completions - same encoding as WQE. Error coding is 0x1F Receive: 0x0 - RDMA-Write with Immediate @@ -3052,14 +3052,14 @@ struct hermonprm_completion_queue_entry_st { /* Little Endian */ pseudo_bit_t reserved1[0x00010]; pseudo_bit_t reserved2[0x00008]; /* -------------- */ -}; +}; /* */ struct hermonprm_mcg_qps_st { /* Little Endian */ struct hermonprm_mcg_qp_dw_st dw[128]; /* -------------- */ -}; +}; /* */ @@ -3084,7 +3084,7 @@ struct hermonprm_mcg_hdr_st { /* Little Endian */ /* -------------- */ pseudo_bit_t gid0[0x00020]; /* -------------- */ -}; +}; /* */ @@ -3096,7 +3096,7 @@ struct hermonprm_sched_queue_context_st { /* Little Endian */ pseudo_bit_t reserved0[0x00006]; pseudo_bit_t weight[0x00010]; /* Weight of this SchQ */ /* -------------- */ -}; +}; /* */ @@ -3117,7 +3117,7 @@ struct hermonprm_ecc_detect_event_data_st { /* Little Endian */ pseudo_bit_t err_ra[0x00010]; pseudo_bit_t err_ca[0x00010]; /* -------------- */ -}; +}; /* Event_data Field - ECC Detection Event */ @@ -3142,14 +3142,14 @@ struct hermonprm_scrubbing_event_st { /* Little Endian */ pseudo_bit_t err_ra[0x00010]; /* Error row address */ pseudo_bit_t err_ca[0x00010]; /* Error column address */ /* -------------- */ -}; +}; /* */ struct hermonprm_eq_cmd_doorbell_st { /* Little Endian */ pseudo_bit_t reserved0[0x00020]; /* -------------- */ -}; +}; /* 0 */ @@ -3400,5 +3400,5 @@ struct hermonprm_hermon_prm_st { /* Little Endian */ /* -------------- */ pseudo_bit_t reserved64[0xffcfc0]; /* -------------- */ -}; +}; #endif /* H_prefix_hermonprm_bits_fixnames_MT25408_PRM_csp_H */ diff --git a/src/drivers/infiniband/arbel.c b/src/drivers/infiniband/arbel.c index 98a2b6010..fbef3f8a6 100644 --- a/src/drivers/infiniband/arbel.c +++ b/src/drivers/infiniband/arbel.c @@ -639,8 +639,8 @@ static int arbel_create_cq ( struct ib_device *ibdev, /* Allocate completion queue itself */ arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) ); - arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size, - sizeof ( arbel_cq->cqe[0] ) ); + arbel_cq->cqe = malloc_phys ( arbel_cq->cqe_size, + sizeof ( arbel_cq->cqe[0] ) ); if ( ! arbel_cq->cqe ) { rc = -ENOMEM; goto err_cqe; @@ -697,7 +697,7 @@ static int arbel_create_cq ( struct ib_device *ibdev, err_sw2hw_cq: MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE ); MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE ); - free_dma ( arbel_cq->cqe, arbel_cq->cqe_size ); + free_phys ( arbel_cq->cqe, arbel_cq->cqe_size ); err_cqe: free ( arbel_cq ); err_arbel_cq: @@ -737,7 +737,7 @@ static void arbel_destroy_cq ( struct ib_device *ibdev, MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE ); /* Free memory */ - free_dma ( arbel_cq->cqe, arbel_cq->cqe_size ); + free_phys ( arbel_cq->cqe, arbel_cq->cqe_size ); free ( arbel_cq ); /* Mark queue number as free */ @@ -873,8 +873,8 @@ static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq, /* Allocate work queue */ arbel_send_wq->wqe_size = ( num_wqes * sizeof ( arbel_send_wq->wqe[0] ) ); - arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size, - sizeof ( arbel_send_wq->wqe[0] ) ); + arbel_send_wq->wqe = malloc_phys ( arbel_send_wq->wqe_size, + sizeof ( arbel_send_wq->wqe[0] ) ); if ( ! arbel_send_wq->wqe ) return -ENOMEM; memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size ); @@ -914,8 +914,8 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq, /* Allocate work queue */ arbel_recv_wq->wqe_size = ( num_wqes * sizeof ( arbel_recv_wq->wqe[0] ) ); - arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size, - sizeof ( arbel_recv_wq->wqe[0] ) ); + arbel_recv_wq->wqe = malloc_phys ( arbel_recv_wq->wqe_size, + sizeof ( arbel_recv_wq->wqe[0] ) ); if ( ! arbel_recv_wq->wqe ) { rc = -ENOMEM; goto err_alloc_wqe; @@ -927,8 +927,8 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq, ( type == IB_QPT_UD ) ) { arbel_recv_wq->grh_size = ( num_wqes * sizeof ( arbel_recv_wq->grh[0] ) ); - arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size, - sizeof ( void * ) ); + arbel_recv_wq->grh = malloc_phys ( arbel_recv_wq->grh_size, + sizeof ( void * ) ); if ( ! arbel_recv_wq->grh ) { rc = -ENOMEM; goto err_alloc_grh; @@ -954,9 +954,9 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq, return 0; - free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size ); + free_phys ( arbel_recv_wq->grh, arbel_recv_wq->grh_size ); err_alloc_grh: - free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size ); + free_phys ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size ); err_alloc_wqe: return rc; } @@ -1102,10 +1102,10 @@ static int arbel_create_qp ( struct ib_device *ibdev, MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE ); MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE ); err_unsupported_address_split: - free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size ); - free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size ); + free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size ); + free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size ); err_create_recv_wq: - free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size ); + free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size ); err_create_send_wq: free ( arbel_qp ); err_arbel_qp: @@ -1231,9 +1231,9 @@ static void arbel_destroy_qp ( struct ib_device *ibdev, MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE ); /* Free memory */ - free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size ); - free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size ); - free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size ); + free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size ); + free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size ); + free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size ); free ( arbel_qp ); /* Mark queue number as free */ @@ -1758,8 +1758,8 @@ static int arbel_create_eq ( struct arbel *arbel ) { /* Allocate event queue itself */ arbel_eq->eqe_size = ( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) ); - arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size, - sizeof ( arbel_eq->eqe[0] ) ); + arbel_eq->eqe = malloc_phys ( arbel_eq->eqe_size, + sizeof ( arbel_eq->eqe[0] ) ); if ( ! arbel_eq->eqe ) { rc = -ENOMEM; goto err_eqe; @@ -1806,7 +1806,7 @@ static int arbel_create_eq ( struct arbel *arbel ) { err_map_eq: arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx ); err_sw2hw_eq: - free_dma ( arbel_eq->eqe, arbel_eq->eqe_size ); + free_phys ( arbel_eq->eqe, arbel_eq->eqe_size ); err_eqe: memset ( arbel_eq, 0, sizeof ( *arbel_eq ) ); return rc; @@ -1844,7 +1844,7 @@ static void arbel_destroy_eq ( struct arbel *arbel ) { } /* Free memory */ - free_dma ( arbel_eq->eqe, arbel_eq->eqe_size ); + free_phys ( arbel_eq->eqe, arbel_eq->eqe_size ); memset ( arbel_eq, 0, sizeof ( *arbel_eq ) ); } @@ -2059,7 +2059,8 @@ static int arbel_start_firmware ( struct arbel *arbel ) { eq_set_ci_base_addr = ( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) | ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) ); - arbel->eq_ci_doorbells = ioremap ( eq_set_ci_base_addr, 0x200 ); + arbel->eq_ci_doorbells = pci_ioremap ( arbel->pci, eq_set_ci_base_addr, + 0x200 ); /* Enable locally-attached memory. Ignore failure; there may * be no attached memory. @@ -2454,7 +2455,7 @@ static int arbel_alloc_icm ( struct arbel *arbel, icm_phys = user_to_phys ( arbel->icm, 0 ); /* Allocate doorbell UAR */ - arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE ); + arbel->db_rec = malloc_phys ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE ); if ( ! arbel->db_rec ) { rc = -ENOMEM; goto err_alloc_doorbell; @@ -2512,7 +2513,7 @@ static int arbel_alloc_icm ( struct arbel *arbel, err_map_icm: arbel_cmd_unmap_icm_aux ( arbel ); err_map_icm_aux: - free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE ); + free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE ); arbel->db_rec= NULL; err_alloc_doorbell: err_alloc_icm: @@ -2535,7 +2536,7 @@ static void arbel_free_icm ( struct arbel *arbel ) { arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ), &unmap_icm ); arbel_cmd_unmap_icm_aux ( arbel ); - free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE ); + free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE ); arbel->db_rec = NULL; } @@ -2983,18 +2984,18 @@ static struct arbel * arbel_alloc ( void ) { goto err_arbel; /* Allocate space for mailboxes */ - arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN ); + arbel->mailbox_in = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN ); if ( ! arbel->mailbox_in ) goto err_mailbox_in; - arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN ); + arbel->mailbox_out = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN ); if ( ! arbel->mailbox_out ) goto err_mailbox_out; return arbel; - free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE ); + free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE ); err_mailbox_out: - free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE ); + free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE ); err_mailbox_in: free ( arbel ); err_arbel: @@ -3010,8 +3011,8 @@ static void arbel_free ( struct arbel *arbel ) { ufree ( arbel->icm ); ufree ( arbel->firmware_area ); - free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE ); - free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE ); + free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE ); + free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE ); free ( arbel ); } @@ -3025,6 +3026,8 @@ static void arbel_free ( struct arbel *arbel ) { static int arbel_probe ( struct pci_device *pci ) { struct arbel *arbel; struct ib_device *ibdev; + unsigned long config; + unsigned long uar; int i; int rc; @@ -3041,11 +3044,11 @@ static int arbel_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map PCI BARs */ - arbel->config = ioremap ( pci_bar_start ( pci, ARBEL_PCI_CONFIG_BAR ), - ARBEL_PCI_CONFIG_BAR_SIZE ); - arbel->uar = ioremap ( ( pci_bar_start ( pci, ARBEL_PCI_UAR_BAR ) + - ARBEL_PCI_UAR_IDX * ARBEL_PCI_UAR_SIZE ), - ARBEL_PCI_UAR_SIZE ); + config = pci_bar_start ( pci, ARBEL_PCI_CONFIG_BAR ); + arbel->config = pci_ioremap ( pci, config, ARBEL_PCI_CONFIG_BAR_SIZE ); + uar = ( pci_bar_start ( pci, ARBEL_PCI_UAR_BAR ) + + ARBEL_PCI_UAR_IDX * ARBEL_PCI_UAR_SIZE ); + arbel->uar = pci_ioremap ( pci, uar, ARBEL_PCI_UAR_SIZE ); /* Allocate Infiniband devices */ for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) { @@ -3058,6 +3061,7 @@ static int arbel_probe ( struct pci_device *pci ) { ibdev->op = &arbel_ib_operations; ibdev->dev = &pci->dev; ibdev->port = ( ARBEL_PORT_BASE + i ); + ibdev->ports = ARBEL_NUM_PORTS; ib_set_drvdata ( ibdev, arbel ); } diff --git a/src/drivers/infiniband/flexboot_nodnic.c b/src/drivers/infiniband/flexboot_nodnic.c index 93bb05446..c6e19b955 100644 --- a/src/drivers/infiniband/flexboot_nodnic.c +++ b/src/drivers/infiniband/flexboot_nodnic.c @@ -365,7 +365,8 @@ static int flexboot_nodnic_create_qp ( struct ib_device *ibdev, goto qp_alloc_err; } - status = nodnic_port_create_qp(&port->port_priv, qp->type, + status = nodnic_port_create_qp(&port->port_priv, + (nodnic_queue_pair_type) qp->type, qp->send.num_wqes * sizeof(struct nodnic_send_wqbb), qp->send.num_wqes, qp->recv.num_wqes * sizeof(struct nodnic_recv_wqe), @@ -406,7 +407,8 @@ static void flexboot_nodnic_destroy_qp ( struct ib_device *ibdev, struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1]; struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp ); - nodnic_port_destroy_qp(&port->port_priv, qp->type, + nodnic_port_destroy_qp(&port->port_priv, + (nodnic_queue_pair_type) qp->type, flexboot_nodnic_qp->nodnic_queue_pair); free(flexboot_nodnic_qp); @@ -1163,6 +1165,7 @@ flexboot_nodnic_allocate_infiniband_devices( struct flexboot_nodnic *flexboot_no ibdev->op = &flexboot_nodnic_ib_operations; ibdev->dev = &pci->dev; ibdev->port = ( FLEXBOOT_NODNIC_PORT_BASE + i); + ibdev->ports = device_priv->device_cap.num_ports; ib_set_drvdata(ibdev, flexboot_nodnic_priv); } return status; @@ -1459,7 +1462,7 @@ static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) return -EINVAL; } uar->phys = ( pci_bar_start ( pci, FLEXBOOT_NODNIC_HCA_BAR ) + (mlx_uint32)uar->offset ); - uar->virt = ( void * )( ioremap ( uar->phys, FLEXBOOT_NODNIC_PAGE_SIZE ) ); + uar->virt = ( void * )( pci_ioremap ( pci, uar->phys, FLEXBOOT_NODNIC_PAGE_SIZE ) ); return status; } diff --git a/src/drivers/infiniband/golan.c b/src/drivers/infiniband/golan.c index e96ba2698..2f1ab2357 100755 --- a/src/drivers/infiniband/golan.c +++ b/src/drivers/infiniband/golan.c @@ -585,9 +585,9 @@ static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( un static inline void golan_cmd_uninit ( struct golan *golan ) { - free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE); - free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE); - free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE); + free_phys(golan->mboxes.outbox, GOLAN_PAGE_SIZE); + free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE); + free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE); } /** @@ -602,17 +602,17 @@ static inline int golan_cmd_init ( struct golan *golan ) int rc = 0; uint32_t addr_l_sz; - if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { + if (!(golan->cmd.addr = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { rc = -ENOMEM; - goto malloc_dma_failed; + goto malloc_phys_failed; } - if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { + if (!(golan->mboxes.inbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { rc = -ENOMEM; - goto malloc_dma_inbox_failed; + goto malloc_phys_inbox_failed; } - if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { + if (!(golan->mboxes.outbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) { rc = -ENOMEM; - goto malloc_dma_outbox_failed; + goto malloc_phys_outbox_failed; } addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz)); @@ -629,11 +629,11 @@ static inline int golan_cmd_init ( struct golan *golan ) DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__); return 0; -malloc_dma_outbox_failed: - free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE); -malloc_dma_inbox_failed: - free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE); -malloc_dma_failed: +malloc_phys_outbox_failed: + free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE); +malloc_phys_inbox_failed: + free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE); +malloc_phys_failed: DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n", __FUNCTION__, rc); return rc; @@ -693,7 +693,7 @@ static inline int golan_alloc_uar(struct golan *golan) uar->index = be32_to_cpu(out->uarn) & 0xffffff; uar->phys = (pci_bar_start(golan->pci, GOLAN_HCA_BAR) + (uar->index << GOLAN_PAGE_SHIFT)); - uar->virt = (void *)(ioremap(uar->phys, GOLAN_PAGE_SIZE)); + uar->virt = (void *)(pci_ioremap(golan->pci, uar->phys, GOLAN_PAGE_SIZE)); DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index); return 0; @@ -743,7 +743,7 @@ static int golan_create_eq(struct golan *golan) eq->cons_index = 0; eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]); - eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); + eq->eqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); if (!eq->eqes) { rc = -ENOMEM; goto err_create_eq_eqe_alloc; @@ -781,7 +781,7 @@ static int golan_create_eq(struct golan *golan) return 0; err_create_eq_cmd: - free_dma ( eq->eqes , GOLAN_PAGE_SIZE ); + free_phys ( eq->eqes , GOLAN_PAGE_SIZE ); err_create_eq_eqe_alloc: DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc); return rc; @@ -806,7 +806,7 @@ static void golan_destory_eq(struct golan *golan) rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__); GOLAN_PRINT_RC_AND_CMD_STATUS; - free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE ); + free_phys ( golan->eq.eqes , GOLAN_PAGE_SIZE ); golan->eq.eqn = 0; DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn); @@ -922,8 +922,8 @@ static inline void golan_pci_init(struct golan *golan) adjust_pci_device ( pci ); /* Get HCA BAR */ - golan->iseg = ioremap ( pci_bar_start ( pci, GOLAN_HCA_BAR), - GOLAN_PCI_CONFIG_BAR_SIZE ); + golan->iseg = pci_ioremap ( pci, pci_bar_start ( pci, GOLAN_HCA_BAR), + GOLAN_PCI_CONFIG_BAR_SIZE ); } static inline struct golan *golan_alloc() @@ -962,14 +962,14 @@ static int golan_create_cq(struct ib_device *ibdev, goto err_create_cq; } golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes; - golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE, + golan_cq->doorbell_record = malloc_phys(GOLAN_CQ_DB_RECORD_SIZE, GOLAN_CQ_DB_RECORD_SIZE); if (!golan_cq->doorbell_record) { rc = -ENOMEM; goto err_create_cq_db_alloc; } - golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); + golan_cq->cqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); if (!golan_cq->cqes) { rc = -ENOMEM; goto err_create_cq_cqe_alloc; @@ -1008,9 +1008,9 @@ static int golan_create_cq(struct ib_device *ibdev, return 0; err_create_cq_cmd: - free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE ); + free_phys( golan_cq->cqes , GOLAN_PAGE_SIZE ); err_create_cq_cqe_alloc: - free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE); + free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE); err_create_cq_db_alloc: free ( golan_cq ); err_create_cq: @@ -1045,8 +1045,8 @@ static void golan_destroy_cq(struct ib_device *ibdev, cq->cqn = 0; ib_cq_set_drvdata(cq, NULL); - free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE ); - free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE); + free_phys ( golan_cq->cqes , GOLAN_PAGE_SIZE ); + free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE); free(golan_cq); DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn); @@ -1138,7 +1138,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev, golan_qp->size = golan_qp->sq.size + golan_qp->rq.size; /* allocate dma memory for WQEs (1 page is enough) - should change it */ - golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); + golan_qp->wqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE ); if (!golan_qp->wqes) { rc = -ENOMEM; goto err_create_qp_wqe_alloc; @@ -1160,7 +1160,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev, data++; } - golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db), + golan_qp->doorbell_record = malloc_phys(sizeof(struct golan_qp_db), sizeof(struct golan_qp_db)); if (!golan_qp->doorbell_record) { rc = -ENOMEM; @@ -1213,9 +1213,9 @@ static int golan_create_qp_aux(struct ib_device *ibdev, return 0; err_create_qp_cmd: - free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db)); + free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db)); err_create_qp_db_alloc: - free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE ); + free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE ); err_create_qp_wqe_alloc: err_create_qp_sq_size: err_create_qp_sq_wqe_size: @@ -1422,8 +1422,8 @@ static void golan_destroy_qp(struct ib_device *ibdev, qp->qpn = 0; ib_qp_set_drvdata(qp, NULL); - free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db)); - free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE ); + free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db)); + free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE ); free(golan_qp); DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn); @@ -2386,6 +2386,7 @@ static int golan_probe_normal ( struct pci_device *pci ) { ibdev->op = &golan_ib_operations; ibdev->dev = &pci->dev; ibdev->port = (GOLAN_PORT_BASE + i); + ibdev->ports = golan->caps.num_ports; ib_set_drvdata( ibdev, golan ); } @@ -2642,7 +2643,10 @@ static struct pci_device_id golan_nics[] = { PCI_ROM ( 0x15b3, 0x1019, "ConnectX-5EX", "ConnectX-5EX HCA driver, DevID 4121", 0 ), PCI_ROM ( 0x15b3, 0x101b, "ConnectX-6", "ConnectX-6 HCA driver, DevID 4123", 0 ), PCI_ROM ( 0x15b3, 0x101d, "ConnectX-6DX", "ConnectX-6DX HCA driver, DevID 4125", 0 ), + PCI_ROM ( 0x15b3, 0x101f, "ConnectX-6Lx", "ConnectX-6LX HCA driver, DevID 4127", 0 ), + PCI_ROM ( 0x15b3, 0x1021, "ConnectX-7", "ConnectX-7 HCA driver, DevID 4129", 0 ), PCI_ROM ( 0x15b3, 0xa2d2, "BlueField", "BlueField integrated ConnectX-5 network controller HCA driver, DevID 41682", 0 ), + PCI_ROM ( 0x15b3, 0xa2d6, "BlueField-2", "BlueField-2 network controller HCA driver, DevID 41686", 0 ), }; struct pci_driver golan_driver __pci_driver = { diff --git a/src/drivers/infiniband/hermon.c b/src/drivers/infiniband/hermon.c index 9675c156b..2afaaf991 100644 --- a/src/drivers/infiniband/hermon.c +++ b/src/drivers/infiniband/hermon.c @@ -137,13 +137,13 @@ static int hermon_cmd_wait ( struct hermon *hermon, struct hermonprm_hca_command_register *hcr ) { unsigned int wait; - for ( wait = HERMON_HCR_MAX_WAIT_MS ; wait ; wait-- ) { + for ( wait = ( 100 * HERMON_HCR_MAX_WAIT_MS ) ; wait ; wait-- ) { hcr->u.dwords[6] = readl ( hermon->config + HERMON_HCR_REG ( 6 ) ); if ( ( MLX_GET ( hcr, go ) == 0 ) && ( MLX_GET ( hcr, t ) == hermon->toggle ) ) return 0; - mdelay ( 1 ); + udelay ( 10 ); } return -EBUSY; } @@ -175,7 +175,7 @@ static int hermon_cmd ( struct hermon *hermon, unsigned long command, assert ( in_len <= HERMON_MBOX_SIZE ); assert ( out_len <= HERMON_MBOX_SIZE ); - DBGC2 ( hermon, "Hermon %p command %02x in %zx%s out %zx%s\n", + DBGC2 ( hermon, "Hermon %p command %04x in %zx%s out %zx%s\n", hermon, opcode, in_len, ( ( command & HERMON_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len, ( ( command & HERMON_HCR_OUT_MBOX ) ? "(mbox)" : "" ) ); @@ -214,8 +214,6 @@ static int hermon_cmd ( struct hermon *hermon, unsigned long command, opcode_modifier, op_mod, go, 1, t, hermon->toggle ); - DBGC ( hermon, "Hermon %p issuing command %04x\n", - hermon, opcode ); DBGC2_HDA ( hermon, virt_to_phys ( hermon->config + HERMON_HCR_BASE ), &hcr, sizeof ( hcr ) ); if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) { @@ -234,8 +232,8 @@ static int hermon_cmd ( struct hermon *hermon, unsigned long command, /* Wait for command completion */ if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) { - DBGC ( hermon, "Hermon %p timed out waiting for command:\n", - hermon ); + DBGC ( hermon, "Hermon %p timed out waiting for command " + "%04x:\n", hermon, opcode ); DBGC_HDA ( hermon, virt_to_phys ( hermon->config + HERMON_HCR_BASE ), &hcr, sizeof ( hcr ) ); @@ -245,8 +243,8 @@ static int hermon_cmd ( struct hermon *hermon, unsigned long command, /* Check command status */ status = MLX_GET ( &hcr, status ); if ( status != 0 ) { - DBGC ( hermon, "Hermon %p command failed with status %02x:\n", - hermon, status ); + DBGC ( hermon, "Hermon %p command %04x failed with status " + "%02x:\n", hermon, opcode, status ); DBGC_HDA ( hermon, virt_to_phys ( hermon->config + HERMON_HCR_BASE ), &hcr, sizeof ( hcr ) ); @@ -333,6 +331,13 @@ hermon_cmd_sw2hw_mpt ( struct hermon *hermon, unsigned int index, 0, mpt, index, NULL ); } +static inline int +hermon_cmd_hw2sw_mpt ( struct hermon *hermon, unsigned int index ) { + return hermon_cmd ( hermon, + HERMON_HCR_VOID_CMD ( HERMON_HCR_HW2SW_MPT ), + 0, NULL, index, NULL ); +} + static inline int hermon_cmd_write_mtt ( struct hermon *hermon, const struct hermonprm_write_mtt *write_mtt ) { @@ -638,9 +643,9 @@ static int hermon_alloc_mtt ( struct hermon *hermon, mtt_offset = hermon_bitmask_alloc ( hermon->mtt_inuse, HERMON_MAX_MTTS, num_pages ); if ( mtt_offset < 0 ) { - DBGC ( hermon, "Hermon %p could not allocate %d MTT entries\n", - hermon, num_pages ); rc = mtt_offset; + DBGC ( hermon, "Hermon %p could not allocate %d MTT entries: " + "%s\n", hermon, num_pages, strerror ( rc ) ); goto err_mtt_offset; } mtt_base_addr = ( ( hermon->cap.reserved_mtts + mtt_offset ) * @@ -664,8 +669,9 @@ static int hermon_alloc_mtt ( struct hermon *hermon, ptag_l, ( addr >> 3 ) ); if ( ( rc = hermon_cmd_write_mtt ( hermon, &write_mtt ) ) != 0 ) { - DBGC ( hermon, "Hermon %p could not write MTT at %x\n", - hermon, mtt_base_addr ); + DBGC ( hermon, "Hermon %p could not write MTT at %x: " + "%s\n", hermon, mtt_base_addr, + strerror ( rc ) ); goto err_write_mtt; } addr += HERMON_PAGE_SIZE; @@ -817,6 +823,11 @@ hermon_dump_cqctx ( struct hermon *hermon, struct ib_completion_queue *cq ) { struct hermonprm_completion_queue_context cqctx; int rc; + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return 0; + + /* Dump completion queue context */ memset ( &cqctx, 0, sizeof ( cqctx ) ); if ( ( rc = hermon_cmd_query_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) { DBGC ( hermon, "Hermon %p CQN %#lx QUERY_CQ failed: %s\n", @@ -859,14 +870,18 @@ static int hermon_create_cq ( struct ib_device *ibdev, /* Allocate control structures */ hermon_cq = zalloc ( sizeof ( *hermon_cq ) ); if ( ! hermon_cq ) { + DBGC ( hermon, "Hermon %p CQN %#lx could not allocate CQ\n", + hermon, cq->cqn ); rc = -ENOMEM; goto err_hermon_cq; } /* Allocate doorbell */ - hermon_cq->doorbell = malloc_dma ( sizeof ( hermon_cq->doorbell[0] ), - sizeof ( hermon_cq->doorbell[0] ) ); + hermon_cq->doorbell = malloc_phys ( sizeof ( hermon_cq->doorbell[0] ), + sizeof ( hermon_cq->doorbell[0] ) ); if ( ! hermon_cq->doorbell ) { + DBGC ( hermon, "Hermon %p CQN %#lx could not allocate " + "doorbell\n", hermon, cq->cqn ); rc = -ENOMEM; goto err_doorbell; } @@ -874,9 +889,11 @@ static int hermon_create_cq ( struct ib_device *ibdev, /* Allocate completion queue itself */ hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) ); - hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size, - sizeof ( hermon_cq->cqe[0] ) ); + hermon_cq->cqe = malloc_phys ( hermon_cq->cqe_size, + sizeof ( hermon_cq->cqe[0] ) ); if ( ! hermon_cq->cqe ) { + DBGC ( hermon, "Hermon %p CQN %#lx could not allocate CQEs\n", + hermon, cq->cqn ); rc = -ENOMEM; goto err_cqe; } @@ -889,8 +906,11 @@ static int hermon_create_cq ( struct ib_device *ibdev, /* Allocate MTT entries */ if ( ( rc = hermon_alloc_mtt ( hermon, hermon_cq->cqe, hermon_cq->cqe_size, - &hermon_cq->mtt ) ) != 0 ) + &hermon_cq->mtt ) ) != 0 ) { + DBGC ( hermon, "Hermon %p CQN %#lx could not allocate MTTs: " + "%s\n", hermon, cq->cqn, strerror ( rc ) ); goto err_alloc_mtt; + } /* Hand queue over to hardware */ memset ( &cqctx, 0, sizeof ( cqctx ) ); @@ -925,9 +945,9 @@ static int hermon_create_cq ( struct ib_device *ibdev, err_sw2hw_cq: hermon_free_mtt ( hermon, &hermon_cq->mtt ); err_alloc_mtt: - free_dma ( hermon_cq->cqe, hermon_cq->cqe_size ); + free_phys ( hermon_cq->cqe, hermon_cq->cqe_size ); err_cqe: - free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) ); + free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) ); err_doorbell: free ( hermon_cq ); err_hermon_cq: @@ -962,8 +982,8 @@ static void hermon_destroy_cq ( struct ib_device *ibdev, hermon_free_mtt ( hermon, &hermon_cq->mtt ); /* Free memory */ - free_dma ( hermon_cq->cqe, hermon_cq->cqe_size ); - free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) ); + free_phys ( hermon_cq->cqe, hermon_cq->cqe_size ); + free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) ); free ( hermon_cq ); /* Mark queue number as free */ @@ -1084,16 +1104,30 @@ static uint8_t hermon_qp_st[] = { */ static __attribute__ (( unused )) int hermon_dump_qpctx ( struct hermon *hermon, struct ib_queue_pair *qp ) { + struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp ); struct hermonprm_qp_ee_state_transitions qpctx; + unsigned int state; int rc; + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return 0; + + /* Dump queue pair context */ memset ( &qpctx, 0, sizeof ( qpctx ) ); if ( ( rc = hermon_cmd_query_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ) { DBGC ( hermon, "Hermon %p QPN %#lx QUERY_QP failed: %s\n", hermon, qp->qpn, strerror ( rc ) ); return rc; } - DBGC ( hermon, "Hermon %p QPN %#lx context:\n", hermon, qp->qpn ); + state = MLX_GET ( &qpctx, qpc_eec_data.state ); + if ( state != hermon_qp->state ) { + DBGC ( hermon, "Hermon %p QPN %#lx state %d unexpected " + "(should be %d)\n", + hermon, qp->qpn, state, hermon_qp->state ); + } + DBGC ( hermon, "Hermon %p QPN %#lx state %d context:\n", + hermon, qp->qpn, state ); DBGC_HDA ( hermon, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) ); return 0; @@ -1122,15 +1156,19 @@ static int hermon_create_qp ( struct ib_device *ibdev, /* Allocate control structures */ hermon_qp = zalloc ( sizeof ( *hermon_qp ) ); if ( ! hermon_qp ) { + DBGC ( hermon, "Hermon %p QPN %#lx could not allocate QP\n", + hermon, qp->qpn ); rc = -ENOMEM; goto err_hermon_qp; } /* Allocate doorbells */ hermon_qp->recv.doorbell = - malloc_dma ( sizeof ( hermon_qp->recv.doorbell[0] ), - sizeof ( hermon_qp->recv.doorbell[0] ) ); + malloc_phys ( sizeof ( hermon_qp->recv.doorbell[0] ), + sizeof ( hermon_qp->recv.doorbell[0] ) ); if ( ! hermon_qp->recv.doorbell ) { + DBGC ( hermon, "Hermon %p QPN %#lx could not allocate " + "doorbell\n", hermon, qp->qpn ); rc = -ENOMEM; goto err_recv_doorbell; } @@ -1157,9 +1195,11 @@ static int hermon_create_qp ( struct ib_device *ibdev, hermon_qp->wqe_size = ( hermon_qp->send.wqe_size + hermon_qp->recv.wqe_size + hermon_qp->recv.grh_size ); - hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size, - sizeof ( hermon_qp->send.wqe[0] ) ); + hermon_qp->wqe = malloc_phys ( hermon_qp->wqe_size, + sizeof ( hermon_qp->send.wqe[0] ) ); if ( ! hermon_qp->wqe ) { + DBGC ( hermon, "Hermon %p QPN %#lx could not allocate WQEs\n", + hermon, qp->qpn ); rc = -ENOMEM; goto err_alloc_wqe; } @@ -1184,6 +1224,8 @@ static int hermon_create_qp ( struct ib_device *ibdev, if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe, hermon_qp->wqe_size, &hermon_qp->mtt ) ) != 0 ) { + DBGC ( hermon, "Hermon %p QPN %#lx could not allocate MTTs: " + "%s\n", hermon, qp->qpn, strerror ( rc ) ); goto err_alloc_mtt; } @@ -1248,10 +1290,10 @@ static int hermon_create_qp ( struct ib_device *ibdev, err_rst2init_qp: hermon_free_mtt ( hermon, &hermon_qp->mtt ); err_alloc_mtt: - free_dma ( hermon_qp->wqe, hermon_qp->wqe_size ); + free_phys ( hermon_qp->wqe, hermon_qp->wqe_size ); err_alloc_wqe: - free_dma ( hermon_qp->recv.doorbell, - sizeof ( hermon_qp->recv.doorbell[0] ) ); + free_phys ( hermon_qp->recv.doorbell, + sizeof ( hermon_qp->recv.doorbell[0] ) ); err_recv_doorbell: free ( hermon_qp ); err_hermon_qp: @@ -1363,9 +1405,9 @@ static void hermon_destroy_qp ( struct ib_device *ibdev, hermon_free_mtt ( hermon, &hermon_qp->mtt ); /* Free memory */ - free_dma ( hermon_qp->wqe, hermon_qp->wqe_size ); - free_dma ( hermon_qp->recv.doorbell, - sizeof ( hermon_qp->recv.doorbell[0] ) ); + free_phys ( hermon_qp->wqe, hermon_qp->wqe_size ); + free_phys ( hermon_qp->recv.doorbell, + sizeof ( hermon_qp->recv.doorbell[0] ) ); free ( hermon_qp ); /* Mark queue number as free */ @@ -1766,6 +1808,11 @@ static int hermon_complete ( struct ib_device *ibdev, if ( is_send ) { /* Hand off to completion handler */ ib_complete_send ( ibdev, qp, iobuf, rc ); + } else if ( rc != 0 ) { + /* Dump queue state (for debugging) */ + hermon_dump_qpctx ( hermon, qp ); + /* Hand off to completion handler */ + ib_complete_recv ( ibdev, qp, NULL, NULL, iobuf, rc ); } else { /* Set received length */ len = MLX_GET ( &cqe->normal, byte_cnt ); @@ -1808,7 +1855,7 @@ static int hermon_complete ( struct ib_device *ibdev, assert ( len <= iob_tailroom ( iobuf ) ); iob_put ( iobuf, len ); /* Hand off to completion handler */ - ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, rc ); + ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, 0 ); } return rc; @@ -1862,6 +1909,89 @@ static void hermon_poll_cq ( struct ib_device *ibdev, *************************************************************************** */ +/** + * Dump event queue context (for debugging only) + * + * @v hermon Hermon device + * @v hermon_eq Event queue + * @ret rc Return status code + */ +static __attribute__ (( unused )) int +hermon_dump_eqctx ( struct hermon *hermon, + struct hermon_event_queue *hermon_eq ) { + struct hermonprm_eqc eqctx; + int rc; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return 0; + + /* Dump event queue context */ + memset ( &eqctx, 0, sizeof ( eqctx ) ); + if ( ( rc = hermon_cmd_query_eq ( hermon, hermon_eq->eqn, + &eqctx ) ) != 0 ) { + DBGC ( hermon, "Hermon %p EQN %#lx QUERY_EQ failed: %s\n", + hermon, hermon_eq->eqn, strerror ( rc ) ); + return rc; + } + DBGC ( hermon, "Hermon %p EQN %#lx context:\n", + hermon, hermon_eq->eqn ); + DBGC_HDA ( hermon, 0, &eqctx, sizeof ( eqctx ) ); + + return 0; +} + +/** + * Dump unconsumed event queue entries (for debugging only) + * + * @v hermon Hermon device + * @v hermon_eq Event queue + * @ret rc Return status code + */ +static __attribute__ (( unused )) int +hermon_dump_eqes ( struct hermon *hermon, + struct hermon_event_queue *hermon_eq ) { + struct hermonprm_eqc eqctx; + union hermonprm_event_entry *eqe; + unsigned int mask; + unsigned int prod; + unsigned int cons; + unsigned int idx; + int rc; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return 0; + + /* Dump event queue entries */ + memset ( &eqctx, 0, sizeof ( eqctx ) ); + if ( ( rc = hermon_cmd_query_eq ( hermon, hermon_eq->eqn, + &eqctx ) ) != 0 ) { + DBGC ( hermon, "Hermon %p EQN %#lx QUERY_EQ failed: %s\n", + hermon, hermon_eq->eqn, strerror ( rc ) ); + return rc; + } + mask = ( HERMON_NUM_EQES - 1 ); + prod = MLX_GET ( &eqctx, producer_counter ) & mask; + cons = MLX_GET ( &eqctx, consumer_counter ) & mask; + idx = hermon_eq->next_idx; + if ( ( idx & mask ) != ( cons & mask ) ) { + DBGC ( hermon, "Hermon %p EQN %#lx mismatch: SW %#x != HW " + "%#x\n", hermon, hermon_eq->eqn, idx, cons ); + } + for ( ; ( idx & mask ) != ( prod & mask ) ; idx++ ) { + eqe = &hermon_eq->eqe[idx & mask]; + DBGC ( hermon, "Hermon %p EQN %#lx event %#x owner %d type " + "%#02x:%#02x\n", hermon, hermon_eq->eqn, idx, + MLX_GET ( &eqe->generic, owner ), + MLX_GET ( &eqe->generic, event_type ), + MLX_GET ( &eqe->generic, event_sub_type ) ); + DBGC_HDA ( hermon, 0, eqe, sizeof ( *eqe ) ); + } + + return 0; +} + /** * Create event queue * @@ -1887,9 +2017,11 @@ static int hermon_create_eq ( struct hermon *hermon ) { /* Allocate event queue itself */ hermon_eq->eqe_size = ( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) ); - hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size, - sizeof ( hermon_eq->eqe[0] ) ); + hermon_eq->eqe = malloc_phys ( hermon_eq->eqe_size, + sizeof ( hermon_eq->eqe[0] ) ); if ( ! hermon_eq->eqe ) { + DBGC ( hermon, "Hermon %p EQN %#lx could not allocate EQEs\n", + hermon, hermon_eq->eqn ); rc = -ENOMEM; goto err_eqe; } @@ -1902,8 +2034,11 @@ static int hermon_create_eq ( struct hermon *hermon ) { /* Allocate MTT entries */ if ( ( rc = hermon_alloc_mtt ( hermon, hermon_eq->eqe, hermon_eq->eqe_size, - &hermon_eq->mtt ) ) != 0 ) + &hermon_eq->mtt ) ) != 0 ) { + DBGC ( hermon, "Hermon %p EQN %#lx could not allocate MTTs: " + "%s\n", hermon, hermon_eq->eqn, strerror ( rc ) ); goto err_alloc_mtt; + } /* Hand queue over to hardware */ memset ( &eqctx, 0, sizeof ( eqctx ) ); @@ -1946,7 +2081,7 @@ static int hermon_create_eq ( struct hermon *hermon ) { err_sw2hw_eq: hermon_free_mtt ( hermon, &hermon_eq->mtt ); err_alloc_mtt: - free_dma ( hermon_eq->eqe, hermon_eq->eqe_size ); + free_phys ( hermon_eq->eqe, hermon_eq->eqe_size ); err_eqe: memset ( hermon_eq, 0, sizeof ( *hermon_eq ) ); return rc; @@ -1986,7 +2121,7 @@ static void hermon_destroy_eq ( struct hermon *hermon ) { hermon_free_mtt ( hermon, &hermon_eq->mtt ); /* Free memory */ - free_dma ( hermon_eq->eqe, hermon_eq->eqe_size ); + free_phys ( hermon_eq->eqe, hermon_eq->eqe_size ); memset ( hermon_eq, 0, sizeof ( *hermon_eq ) ); } @@ -2019,6 +2154,32 @@ static void hermon_event_port_state_change ( struct hermon *hermon, link_up ); } +/** + * Handle port management event + * + * @v hermon Hermon device + * @v eqe Port management change event queue entry + */ +static void hermon_event_port_mgmnt_change ( struct hermon *hermon, + union hermonprm_event_entry *eqe){ + unsigned int port; + + /* Get port */ + port = ( MLX_GET ( &eqe->port_mgmnt_change, port ) - 1 ); + DBGC ( hermon, "Hermon %p port %d management change\n", + hermon, ( port + 1 ) ); + + /* Sanity check */ + if ( port >= hermon->cap.num_ports ) { + DBGC ( hermon, "Hermon %p port %d does not exist!\n", + hermon, ( port + 1 ) ); + return; + } + + /* Update MAD parameters */ + ib_smc_update ( hermon->port[port].ibdev, hermon_mad ); +} + /** * Poll event queue * @@ -2029,6 +2190,8 @@ static void hermon_poll_eq ( struct ib_device *ibdev ) { struct hermon_event_queue *hermon_eq = &hermon->eq; union hermonprm_event_entry *eqe; union hermonprm_doorbell_register db_reg; + unsigned long now; + unsigned long elapsed; unsigned int eqe_idx_mask; unsigned int event_type; @@ -2037,7 +2200,12 @@ static void hermon_poll_eq ( struct ib_device *ibdev ) { */ if ( ib_is_open ( ibdev ) && ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) { - ib_smc_update ( ibdev, hermon_mad ); + now = currticks(); + elapsed = ( now - hermon->last_poll ); + if ( elapsed >= HERMON_LINK_POLL_INTERVAL ) { + hermon->last_poll = now; + ib_smc_update ( ibdev, hermon_mad ); + } } /* Poll event queue */ @@ -2061,10 +2229,14 @@ static void hermon_poll_eq ( struct ib_device *ibdev ) { case HERMON_EV_PORT_STATE_CHANGE: hermon_event_port_state_change ( hermon, eqe ); break; + case HERMON_EV_PORT_MGMNT_CHANGE: + hermon_event_port_mgmnt_change ( hermon, eqe ); + break; default: DBGC ( hermon, "Hermon %p EQN %#lx unrecognised event " - "type %#x:\n", - hermon, hermon_eq->eqn, event_type ); + "type %#02x:%#02x\n", + hermon, hermon_eq->eqn, event_type, + MLX_GET ( &eqe->generic, event_sub_type ) ); DBGC_HDA ( hermon, virt_to_phys ( eqe ), eqe, sizeof ( *eqe ) ); break; @@ -2202,6 +2374,8 @@ static int hermon_start_firmware ( struct hermon *hermon ) { hermon->firmware_len = fw_len; hermon->firmware_area = umalloc ( hermon->firmware_len ); if ( ! hermon->firmware_area ) { + DBGC ( hermon, "Hermon %p could not allocate firmware " + "area\n", hermon ); rc = -ENOMEM; goto err_alloc_fa; } @@ -2569,6 +2743,8 @@ static int hermon_map_icm ( struct hermon *hermon, hermon->icm_aux_len = icm_aux_len; hermon->icm = umalloc ( hermon->icm_aux_len + hermon->icm_len ); if ( ! hermon->icm ) { + DBGC ( hermon, "Hermon %p could not allocate ICM\n", + hermon ); rc = -ENOMEM; goto err_alloc; } @@ -2650,22 +2826,45 @@ static void hermon_unmap_icm ( struct hermon *hermon ) { * Reset device * * @v hermon Hermon device + * @ret rc Return status code */ -static void hermon_reset ( struct hermon *hermon ) { +static int hermon_reset ( struct hermon *hermon ) { struct pci_device *pci = hermon->pci; struct pci_config_backup backup; static const uint8_t backup_exclude[] = PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c ); + uint16_t vendor; + unsigned int i; + + /* Reset command interface toggle */ + hermon->toggle = 0; /* Perform device reset and preserve PCI configuration */ pci_backup ( pci, &backup, backup_exclude ); writel ( HERMON_RESET_MAGIC, ( hermon->config + HERMON_RESET_OFFSET ) ); - mdelay ( HERMON_RESET_WAIT_TIME_MS ); - pci_restore ( pci, &backup, backup_exclude ); - /* Reset command interface toggle */ - hermon->toggle = 0; + /* Wait until device starts responding to configuration cycles */ + for ( i = 0 ; i < HERMON_RESET_MAX_WAIT_MS ; i++ ) { + + /* Read PCI vendor ID */ + pci_read_config_word ( pci, PCI_VENDOR_ID, &vendor ); + if ( vendor == pci->vendor ) { + + /* Restore PCI configuration */ + pci_restore ( pci, &backup, backup_exclude ); + + DBGC ( hermon, "Hermon %p reset after %dms\n", + hermon, i ); + return 0; + } + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( hermon, "Hermon %p timed out waiting for reset\n", hermon ); + return -ETIMEDOUT; } /** @@ -2708,6 +2907,25 @@ static int hermon_setup_mpt ( struct hermon *hermon ) { return 0; } +/** + * Unmap memory protection table + * + * @v hermon Hermon device + * @ret rc Return status code + */ +static int hermon_unmap_mpt ( struct hermon *hermon ) { + int rc; + + if ( ( rc = hermon_cmd_hw2sw_mpt ( hermon, + hermon->cap.reserved_mrws ) ) != 0 ){ + DBGC ( hermon, "Hermon %p could not unmap MPT: %s\n", + hermon, strerror ( rc ) ); + return rc; + } + + return 0; +} + /** * Configure special queue pairs * @@ -2784,11 +3002,13 @@ static int hermon_start ( struct hermon *hermon, int running ) { if ( ( rc = hermon_configure_special_qps ( hermon ) ) != 0 ) goto err_conf_special_qps; + DBGC ( hermon, "Hermon %p device started\n", hermon ); return 0; err_conf_special_qps: hermon_destroy_eq ( hermon ); err_create_eq: + hermon_unmap_mpt ( hermon ); err_setup_mpt: hermon_cmd_close_hca ( hermon ); err_init_hca: @@ -2806,6 +3026,7 @@ static int hermon_start ( struct hermon *hermon, int running ) { */ static void hermon_stop ( struct hermon *hermon ) { hermon_destroy_eq ( hermon ); + hermon_unmap_mpt ( hermon ); hermon_cmd_close_hca ( hermon ); hermon_unmap_icm ( hermon ); hermon_stop_firmware ( hermon ); @@ -3080,6 +3301,9 @@ static int hermon_register_ibdev ( struct hermon *hermon, struct ib_device *ibdev = port->ibdev; int rc; + /* Use Ethernet MAC as eIPoIB local EMAC */ + memcpy ( ibdev->lemac, port->eth_mac.raw, ETH_ALEN ); + /* Initialise parameters using SMC */ ib_smc_init ( ibdev, hermon_mad ); @@ -3137,13 +3361,13 @@ static struct hermon_port_type hermon_port_type_ib = { */ /** Number of Hermon Ethernet send work queue entries */ -#define HERMON_ETH_NUM_SEND_WQES 2 +#define HERMON_ETH_NUM_SEND_WQES 16 /** Number of Hermon Ethernet receive work queue entries */ -#define HERMON_ETH_NUM_RECV_WQES 4 +#define HERMON_ETH_NUM_RECV_WQES 8 /** Number of Hermon Ethernet completion entries */ -#define HERMON_ETH_NUM_CQES 8 +#define HERMON_ETH_NUM_CQES 32 /** * Transmit packet via Hermon Ethernet device @@ -3396,24 +3620,10 @@ static int hermon_register_netdev ( struct hermon *hermon, struct hermon_port *port ) { struct net_device *netdev = port->netdev; struct ib_device *ibdev = port->ibdev; - struct hermonprm_query_port_cap query_port; - union { - uint8_t bytes[8]; - uint32_t dwords[2]; - } mac; int rc; - /* Retrieve MAC address */ - if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port, - &query_port ) ) != 0 ) { - DBGC ( hermon, "Hermon %p port %d could not query port: %s\n", - hermon, ibdev->port, strerror ( rc ) ); - goto err_query_port; - } - mac.dwords[0] = htonl ( MLX_GET ( &query_port, mac_47_32 ) ); - mac.dwords[1] = htonl ( MLX_GET ( &query_port, mac_31_0 ) ); - memcpy ( netdev->hw_addr, - &mac.bytes[ sizeof ( mac.bytes ) - ETH_ALEN ], ETH_ALEN ); + /* Set MAC address */ + memcpy ( netdev->hw_addr, port->eth_mac.raw, ETH_ALEN ); /* Register network device */ if ( ( rc = register_netdev ( netdev ) ) != 0 ) { @@ -3437,7 +3647,6 @@ static int hermon_register_netdev ( struct hermon *hermon, err_register_nvo: unregister_netdev ( netdev ); err_register_netdev: - err_query_port: return rc; } @@ -3575,6 +3784,10 @@ static int hermon_set_port_type ( struct hermon *hermon, ( ( ib_supported && eth_supported ) ? " and" : "" ), ( eth_supported ? " Ethernet" : "" ) ); + /* Record Ethernet MAC address */ + port->eth_mac.part.h = htons ( MLX_GET ( &query_port, mac_47_32 ) ); + port->eth_mac.part.l = htonl ( MLX_GET ( &query_port, mac_31_0 ) ); + /* Sense network, if applicable */ if ( ib_supported && eth_supported ) { @@ -3587,6 +3800,9 @@ static int hermon_set_port_type ( struct hermon *hermon, rc = port_type; return rc; } + + /* Avoid spamming debug output */ + mdelay ( 50 ); } while ( ( port_type == HERMON_PORT_TYPE_UNKNOWN ) && ( ( elapsed = ( currticks() - start ) ) < HERMON_SENSE_PORT_TIMEOUT ) ); @@ -3736,20 +3952,20 @@ static struct hermon * hermon_alloc ( void ) { goto err_hermon; /* Allocate space for mailboxes */ - hermon->mailbox_in = malloc_dma ( HERMON_MBOX_SIZE, - HERMON_MBOX_ALIGN ); + hermon->mailbox_in = malloc_phys ( HERMON_MBOX_SIZE, + HERMON_MBOX_ALIGN ); if ( ! hermon->mailbox_in ) goto err_mailbox_in; - hermon->mailbox_out = malloc_dma ( HERMON_MBOX_SIZE, - HERMON_MBOX_ALIGN ); + hermon->mailbox_out = malloc_phys ( HERMON_MBOX_SIZE, + HERMON_MBOX_ALIGN ); if ( ! hermon->mailbox_out ) goto err_mailbox_out; return hermon; - free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE ); + free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE ); err_mailbox_out: - free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE ); + free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE ); err_mailbox_in: free ( hermon ); err_hermon: @@ -3765,8 +3981,8 @@ static void hermon_free ( struct hermon *hermon ) { ufree ( hermon->icm ); ufree ( hermon->firmware_area ); - free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE ); - free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE ); + free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE ); + free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE ); free ( hermon ); } @@ -3782,6 +3998,8 @@ static int hermon_probe ( struct pci_device *pci ) { struct ib_device *ibdev; struct net_device *netdev; struct hermon_port *port; + unsigned long config; + unsigned long uar; unsigned int i; int rc; @@ -3798,13 +4016,16 @@ static int hermon_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map PCI BARs */ - hermon->config = ioremap ( pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR ), - HERMON_PCI_CONFIG_BAR_SIZE ); - hermon->uar = ioremap ( pci_bar_start ( pci, HERMON_PCI_UAR_BAR ), - HERMON_UAR_NON_EQ_PAGE * HERMON_PAGE_SIZE ); + config = pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR ); + hermon->config = pci_ioremap ( pci, config, + HERMON_PCI_CONFIG_BAR_SIZE ); + uar = pci_bar_start ( pci, HERMON_PCI_UAR_BAR ); + hermon->uar = pci_ioremap ( pci, uar, + HERMON_UAR_NON_EQ_PAGE * HERMON_PAGE_SIZE ); /* Reset device */ - hermon_reset ( hermon ); + if ( ( rc = hermon_reset ( hermon ) ) != 0 ) + goto err_reset; /* Start firmware */ if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 ) @@ -3816,7 +4037,7 @@ static int hermon_probe ( struct pci_device *pci ) { /* Allocate Infiniband devices */ for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) { - ibdev = alloc_ibdev ( 0 ); + ibdev = alloc_ibdev ( 0 ); if ( ! ibdev ) { rc = -ENOMEM; goto err_alloc_ibdev; @@ -3825,6 +4046,7 @@ static int hermon_probe ( struct pci_device *pci ) { ibdev->op = &hermon_ib_operations; ibdev->dev = &pci->dev; ibdev->port = ( HERMON_PORT_BASE + i ); + ibdev->ports = hermon->cap.num_ports; ib_set_drvdata ( ibdev, hermon ); } @@ -3896,6 +4118,7 @@ static int hermon_probe ( struct pci_device *pci ) { err_get_cap: hermon_stop_firmware ( hermon ); err_start_firmware: + err_reset: iounmap ( hermon->uar ); iounmap ( hermon->config ); hermon_free ( hermon ); @@ -3937,6 +4160,7 @@ static void hermon_remove ( struct pci_device *pci ) { */ static int hermon_bofm_probe ( struct pci_device *pci ) { struct hermon *hermon; + unsigned long config; int rc; /* Allocate Hermon device */ @@ -3952,8 +4176,9 @@ static int hermon_bofm_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map PCI BAR */ - hermon->config = ioremap ( pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR ), - HERMON_PCI_CONFIG_BAR_SIZE ); + config = pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR ); + hermon->config = pci_ioremap ( pci, config, + HERMON_PCI_CONFIG_BAR_SIZE ); /* Initialise BOFM device */ bofm_init ( &hermon->bofm, pci, &hermon_bofm_operations ); @@ -3988,18 +4213,29 @@ static void hermon_bofm_remove ( struct pci_device *pci ) { } static struct pci_device_id hermon_nics[] = { + /* Mellanox ConnectX VPI (ethernet + infiniband) */ PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver", 0 ), PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver", 0 ), + + /* Mellanox ConnectX EN (ethernet only) */ + PCI_ROM ( 0x15b3, 0x6368, "mt25448", "MT25448 HCA driver", 0 ), + PCI_ROM ( 0x15b3, 0x6372, "mt25458", "MT25458 HCA driver", 0 ), + + /* Mellanox ConnectX-2 VPI (ethernet + infiniband) */ PCI_ROM ( 0x15b3, 0x6732, "mt26418", "MT26418 HCA driver", 0 ), PCI_ROM ( 0x15b3, 0x673c, "mt26428", "MT26428 HCA driver", 0 ), PCI_ROM ( 0x15b3, 0x6746, "mt26438", "MT26438 HCA driver", 0 ), PCI_ROM ( 0x15b3, 0x6778, "mt26488", "MT26488 HCA driver", 0 ), - PCI_ROM ( 0x15b3, 0x6368, "mt25448", "MT25448 HCA driver", 0 ), + + /* Mellanox ConnectX-2 EN (ethernet only) */ PCI_ROM ( 0x15b3, 0x6750, "mt26448", "MT26448 HCA driver", 0 ), - PCI_ROM ( 0x15b3, 0x6372, "mt25458", "MT25458 HCA driver", 0 ), PCI_ROM ( 0x15b3, 0x675a, "mt26458", "MT26458 HCA driver", 0 ), PCI_ROM ( 0x15b3, 0x6764, "mt26468", "MT26468 HCA driver", 0 ), PCI_ROM ( 0x15b3, 0x676e, "mt26478", "MT26478 HCA driver", 0 ), + + /* Mellanox ConnectX-3 VPI (ethernet + infiniband) */ + PCI_ROM ( 0x15b3, 0x1003, "mt4099", "ConnectX-3 HCA driver", 0 ), + PCI_ROM ( 0x15b3, 0x1007, "mt4103", "ConnectX-3 Pro HCA driver", 0 ), }; struct pci_driver hermon_driver __pci_driver = { diff --git a/src/drivers/infiniband/hermon.h b/src/drivers/infiniband/hermon.h index 61e285781..a952bbd81 100644 --- a/src/drivers/infiniband/hermon.h +++ b/src/drivers/infiniband/hermon.h @@ -34,8 +34,8 @@ FILE_LICENCE ( GPL2_OR_LATER ); /* Device reset */ #define HERMON_RESET_OFFSET 0x0f0010 -#define HERMON_RESET_MAGIC 0x01000000UL -#define HERMON_RESET_WAIT_TIME_MS 1000 +#define HERMON_RESET_MAGIC 0x01000001UL +#define HERMON_RESET_MAX_WAIT_MS 1000 /* Work queue entry and completion queue entry opcodes */ #define HERMON_OPCODE_NOP 0x00 @@ -52,6 +52,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); #define HERMON_HCR_CLOSE_PORT 0x000a #define HERMON_HCR_SET_PORT 0x000c #define HERMON_HCR_SW2HW_MPT 0x000d +#define HERMON_HCR_HW2SW_MPT 0x000f #define HERMON_HCR_WRITE_MTT 0x0011 #define HERMON_HCR_MAP_EQ 0x0012 #define HERMON_HCR_SW2HW_EQ 0x0013 @@ -122,6 +123,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); #define HERMON_SET_PORT_GID_TABLE 0x0500 #define HERMON_EV_PORT_STATE_CHANGE 0x09 +#define HERMON_EV_PORT_MGMNT_CHANGE 0x1d #define HERMON_SCHED_QP0 0x3f #define HERMON_SCHED_DEFAULT 0x83 @@ -216,6 +218,13 @@ struct hermonprm_port_state_change_event_st { struct hermonprm_port_state_change_st data; } __attribute__ (( packed )); +struct hermonprm_port_mgmnt_change_event_st { + pseudo_bit_t reserved[0x00020]; +/* -------------- */ + pseudo_bit_t port[0x00008]; + pseudo_bit_t reserved0[0x00018]; +} __attribute__ (( packed )); + struct hermonprm_sense_port_st { pseudo_bit_t reserved0[0x00020]; /* -------------- */ @@ -459,6 +468,7 @@ struct MLX_DECLARE_STRUCT ( hermonprm_mod_stat_cfg_input_mod ); struct MLX_DECLARE_STRUCT ( hermonprm_mpt ); struct MLX_DECLARE_STRUCT ( hermonprm_mtt ); struct MLX_DECLARE_STRUCT ( hermonprm_port_state_change_event ); +struct MLX_DECLARE_STRUCT ( hermonprm_port_mgmnt_change_event ); struct MLX_DECLARE_STRUCT ( hermonprm_qp_db_record ); struct MLX_DECLARE_STRUCT ( hermonprm_qp_ee_state_transitions ); struct MLX_DECLARE_STRUCT ( hermonprm_query_dev_cap ); @@ -529,6 +539,7 @@ union hermonprm_completion_entry { union hermonprm_event_entry { struct hermonprm_event_queue_entry generic; struct hermonprm_port_state_change_event port_state_change; + struct hermonprm_port_mgmnt_change_event port_mgmnt_change; } __attribute__ (( packed )); union hermonprm_doorbell_register { @@ -822,6 +833,15 @@ struct hermon_port_type { struct hermon_port *port ); }; +/** A Hermon port Ethernet MAC address */ +union hermon_port_mac { + struct { + uint16_t h; + uint32_t l; + } __attribute__ (( packed )) part; + uint8_t raw[ETH_ALEN]; +}; + /** A Hermon port */ struct hermon_port { /** Infiniband device */ @@ -832,6 +852,8 @@ struct hermon_port { struct ib_completion_queue *eth_cq; /** Ethernet queue pair */ struct ib_queue_pair *eth_qp; + /** Ethernet MAC */ + union hermon_port_mac eth_mac; /** Port type */ struct hermon_port_type *type; /** Non-volatile option storage */ @@ -882,6 +904,8 @@ struct hermon { /** Event queue */ struct hermon_event_queue eq; + /** Last unsolicited link state poll */ + unsigned long last_poll; /** Unrestricted LKey * * Used to get unrestricted memory access. @@ -918,6 +942,13 @@ struct hermon { /** Memory key prefix */ #define HERMON_MKEY_PREFIX 0x77000000UL +/** Link poll interval + * + * Used when we need to poll for link state (rather than relying upon + * receiving an event). + */ +#define HERMON_LINK_POLL_INTERVAL ( TICKS_PER_SEC / 2 ) + /* * HCA commands * @@ -925,7 +956,7 @@ struct hermon { #define HERMON_HCR_BASE 0x80680 #define HERMON_HCR_REG(x) ( HERMON_HCR_BASE + 4 * (x) ) -#define HERMON_HCR_MAX_WAIT_MS 2000 +#define HERMON_HCR_MAX_WAIT_MS 10000 #define HERMON_MBOX_ALIGN 4096 #define HERMON_MBOX_SIZE 1024 diff --git a/src/drivers/infiniband/linda.c b/src/drivers/infiniband/linda.c index e8d61c865..8c5912660 100644 --- a/src/drivers/infiniband/linda.c +++ b/src/drivers/infiniband/linda.c @@ -531,8 +531,8 @@ static int linda_init_send ( struct linda *linda ) { linda->send_buf[i] = i; /* Allocate space for the SendBufAvail array */ - linda->sendbufavail = malloc_dma ( sizeof ( *linda->sendbufavail ), - LINDA_SENDBUFAVAIL_ALIGN ); + linda->sendbufavail = malloc_phys ( sizeof ( *linda->sendbufavail ), + LINDA_SENDBUFAVAIL_ALIGN ); if ( ! linda->sendbufavail ) { rc = -ENOMEM; goto err_alloc_sendbufavail; @@ -555,7 +555,7 @@ static int linda_init_send ( struct linda *linda ) { return 0; - free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) ); + free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) ); err_alloc_sendbufavail: return rc; } @@ -576,7 +576,7 @@ static void linda_fini_send ( struct linda *linda ) { /* Ensure hardware has seen this disable */ linda_readq ( linda, &sendctrl, QIB_7220_SendCtrl_offset ); - free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) ); + free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) ); } /*************************************************************************** @@ -613,8 +613,8 @@ static int linda_create_recv_wq ( struct linda *linda, linda_wq->eager_cons = 0; /* Allocate receive header buffer */ - linda_wq->header = malloc_dma ( LINDA_RECV_HEADERS_SIZE, - LINDA_RECV_HEADERS_ALIGN ); + linda_wq->header = malloc_phys ( LINDA_RECV_HEADERS_SIZE, + LINDA_RECV_HEADERS_ALIGN ); if ( ! linda_wq->header ) { rc = -ENOMEM; goto err_alloc_header; @@ -650,7 +650,7 @@ static int linda_create_recv_wq ( struct linda *linda, virt_to_bus ( &linda_wq->header_prod ) ); return 0; - free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE ); + free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE ); err_alloc_header: return rc; } @@ -679,7 +679,7 @@ static void linda_destroy_recv_wq ( struct linda *linda, mb(); /* Free headers ring */ - free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE ); + free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE ); /* Free context */ linda_free_ctx ( linda, ctx ); @@ -2330,12 +2330,13 @@ static int linda_probe ( struct pci_device *pci ) { ibdev->op = &linda_ib_operations; ibdev->dev = &pci->dev; ibdev->port = 1; + ibdev->ports = 1; /* Fix up PCI device */ adjust_pci_device ( pci ); /* Map PCI BARs */ - linda->regs = ioremap ( pci->membase, LINDA_BAR0_SIZE ); + linda->regs = pci_ioremap ( pci, pci->membase, LINDA_BAR0_SIZE ); DBGC2 ( linda, "Linda %p has BAR at %08lx\n", linda, pci->membase ); /* Print some general data */ diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c index cb9e759bf..e368d459b 100644 --- a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c +++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_memory_priv.c @@ -61,7 +61,7 @@ mlx_memory_alloc_dma_priv( ) { mlx_status status = MLX_SUCCESS; - *ptr = malloc_dma(size, align); + *ptr = malloc_phys(size, align); if (*ptr == NULL) { status = MLX_OUT_OF_RESOURCES; } else { @@ -78,7 +78,7 @@ mlx_memory_free_dma_priv( ) { mlx_status status = MLX_SUCCESS; - free_dma(ptr, size); + free_phys(ptr, size); return status; } mlx_status diff --git a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c index b474a4a63..6b42bcafc 100644 --- a/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c +++ b/src/drivers/infiniband/mlx_utils_flexboot/src/mlx_pci_priv.c @@ -115,7 +115,7 @@ mlx_pci_init_priv( mlx_status status = MLX_SUCCESS; adjust_pci_device ( utils->pci ); #ifdef DEVICE_CX3 - utils->config = ioremap ( pci_bar_start ( utils->pci, PCI_BASE_ADDRESS_0), + utils->config = pci_ioremap ( utils->pci, pci_bar_start ( utils->pci, PCI_BASE_ADDRESS_0), 0x100000 ); #endif return status; diff --git a/src/drivers/infiniband/nodnic_prm.h b/src/drivers/infiniband/nodnic_prm.h index 5e0fa9890..a962c821c 100644 --- a/src/drivers/infiniband/nodnic_prm.h +++ b/src/drivers/infiniband/nodnic_prm.h @@ -38,10 +38,10 @@ struct nodnic_wqe_segment_data_ptr_st { /* Little Endian */ struct MLX_DECLARE_STRUCT ( nodnic_wqe_segment_data_ptr ); -#define HERMON_MAX_SCATTER 1 +#define NODNIC_MAX_SCATTER 1 struct nodnic_recv_wqe { - struct nodnic_wqe_segment_data_ptr data[HERMON_MAX_SCATTER]; + struct nodnic_wqe_segment_data_ptr data[NODNIC_MAX_SCATTER]; } __attribute__ (( packed )); #endif /* SRC_DRIVERS_INFINIBAND_MLX_NODNIC_INCLUDE_PRM_NODNIC_PRM_H_ */ diff --git a/src/drivers/infiniband/qib7322.c b/src/drivers/infiniband/qib7322.c index 18011c19a..a4b51db05 100644 --- a/src/drivers/infiniband/qib7322.c +++ b/src/drivers/infiniband/qib7322.c @@ -669,8 +669,8 @@ static int qib7322_init_send ( struct qib7322 *qib7322 ) { } /* Allocate space for the SendBufAvail array */ - qib7322->sendbufavail = malloc_dma ( sizeof ( *qib7322->sendbufavail ), - QIB7322_SENDBUFAVAIL_ALIGN ); + qib7322->sendbufavail = malloc_phys ( sizeof ( *qib7322->sendbufavail ), + QIB7322_SENDBUFAVAIL_ALIGN ); if ( ! qib7322->sendbufavail ) { rc = -ENOMEM; goto err_alloc_sendbufavail; @@ -697,7 +697,7 @@ static int qib7322_init_send ( struct qib7322 *qib7322 ) { return 0; - free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) ); + free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) ); err_alloc_sendbufavail: qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 ); err_create_send_bufs_vl15_port1: @@ -724,7 +724,7 @@ static void qib7322_fini_send ( struct qib7322 *qib7322 ) { /* Ensure hardware has seen this disable */ qib7322_readq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset ); - free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) ); + free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) ); qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 ); qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 ); qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small ); @@ -767,8 +767,8 @@ static int qib7322_create_recv_wq ( struct ib_device *ibdev, qib7322_wq->eager_cons = 0; /* Allocate receive header buffer */ - qib7322_wq->header = malloc_dma ( QIB7322_RECV_HEADERS_SIZE, - QIB7322_RECV_HEADERS_ALIGN ); + qib7322_wq->header = malloc_phys ( QIB7322_RECV_HEADERS_SIZE, + QIB7322_RECV_HEADERS_ALIGN ); if ( ! qib7322_wq->header ) { rc = -ENOMEM; goto err_alloc_header; @@ -810,7 +810,7 @@ static int qib7322_create_recv_wq ( struct ib_device *ibdev, virt_to_bus ( &qib7322_wq->header_prod ) ); return 0; - free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE ); + free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE ); err_alloc_header: return rc; } @@ -846,7 +846,7 @@ static void qib7322_destroy_recv_wq ( struct ib_device *ibdev, mb(); /* Free headers ring */ - free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE ); + free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE ); } /** @@ -2297,7 +2297,7 @@ static int qib7322_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map PCI BARs */ - qib7322->regs = ioremap ( pci->membase, QIB7322_BAR0_SIZE ); + qib7322->regs = pci_ioremap ( pci, pci->membase, QIB7322_BAR0_SIZE ); DBGC2 ( qib7322, "QIB7322 %p has BAR at %08lx\n", qib7322, pci->membase ); @@ -2348,6 +2348,7 @@ static int qib7322_probe ( struct pci_device *pci ) { ibdev->dev = &pci->dev; ibdev->op = &qib7322_ib_operations; ibdev->port = ( QIB7322_PORT_BASE + i ); + ibdev->ports = QIB7322_MAX_PORTS; ibdev->link_width_enabled = ibdev->link_width_supported = IB_LINK_WIDTH_4X; /* 1x does not work */ ibdev->link_speed_enabled = ibdev->link_speed_supported = diff --git a/src/drivers/linux/af_packet.c b/src/drivers/linux/af_packet.c index 65aafc5b1..9fa6ef2a5 100644 --- a/src/drivers/linux/af_packet.c +++ b/src/drivers/linux/af_packet.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/drivers/linux/linux.c b/src/drivers/linux/linux.c index 83546b27e..898f50024 100644 --- a/src/drivers/linux/linux.c +++ b/src/drivers/linux/linux.c @@ -130,24 +130,48 @@ struct linux_setting *linux_find_setting(char *name, struct list_head *settings) return result; } -void linux_apply_settings(struct list_head *new_settings, struct settings *settings_block) -{ - struct linux_setting *setting; +/** + * Apply Linux command-line settings + * + * @v list List of command-line settings + * @v settings Settings block + */ +void linux_apply_settings ( struct list_head *list, + struct settings *settings ) { + struct linux_setting *lsetting; + struct settings *ignore; + struct setting setting; int rc; - list_for_each_entry(setting, new_settings, list) { + list_for_each_entry ( lsetting, list, list ) { + /* Skip already applied settings */ - if (setting->applied) + if ( lsetting->applied ) continue; - struct setting *s = find_setting(setting->name); - if (s) { - rc = storef_setting(settings_block, find_setting(setting->name), setting->value); - if (rc != 0) - DBG("linux storing setting '%s' = '%s' failed\n", setting->name, setting->value); - setting->applied = 1; - } else { - DBG("linux unknown setting '%s'\n", setting->name); + /* Parse setting name */ + if ( ( rc = parse_setting_name ( lsetting->name, + find_child_settings, &ignore, + &setting ) ) != 0 ) { + DBGC ( settings, "Linux cannot parse %s: %s\n", + lsetting->name, strerror ( rc ) ); + continue; } + + /* Apply default type if not specified */ + if ( ! setting.type ) + setting.type = &setting_type_string; + + /* Store setting */ + if ( ( rc = storef_setting ( settings, &setting, + lsetting->value ) ) != 0 ) { + DBGC ( settings, "Linux cannot set %s=\"%s\": %s\n", + lsetting->name, lsetting->value, + strerror ( rc ) ); + continue; + } + + /* Mark setting as applied */ + lsetting->applied = 1; } } diff --git a/src/drivers/linux/slirp.c b/src/drivers/linux/slirp.c new file mode 100644 index 000000000..8341c9676 --- /dev/null +++ b/src/drivers/linux/slirp.c @@ -0,0 +1,552 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * Linux Slirp network driver + * + */ + +/** Maximum number of open file descriptors */ +#define SLIRP_MAX_FDS 128 + +/** A Slirp network interface */ +struct slirp_nic { + /** The libslirp device object */ + struct Slirp *slirp; + /** Polling file descriptor list */ + struct pollfd pollfds[SLIRP_MAX_FDS]; + /** Number of file descriptors */ + unsigned int numfds; +}; + +/** A Slirp alarm timer */ +struct slirp_alarm { + /** Slirp network interface */ + struct slirp_nic *slirp; + /** Retry timer */ + struct retry_timer timer; + /** Callback function */ + void ( __asmcall * callback ) ( void *opaque ); + /** Opaque value for callback function */ + void *opaque; +}; + +/** Default MAC address */ +static const uint8_t slirp_default_mac[ETH_ALEN] = + { 0x52, 0x54, 0x00, 0x12, 0x34, 0x56 }; + +/****************************************************************************** + * + * Slirp interface + * + ****************************************************************************** + */ + +/** + * Send packet + * + * @v buf Data buffer + * @v len Length of data + * @v device Device opaque pointer + * @ret len Consumed length (or negative on error) + */ +static ssize_t __asmcall slirp_send_packet ( const void *buf, size_t len, + void *device ) { + struct net_device *netdev = device; + struct io_buffer *iobuf; + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( len ); + if ( ! iobuf ) + return -1; + + /* Populate I/O buffer */ + memcpy ( iob_put ( iobuf, len ), buf, len ); + + /* Hand off to network stack */ + netdev_rx ( netdev, iobuf ); + + return len; +} + +/** + * Print an error message + * + * @v msg Error message + * @v device Device opaque pointer + */ +static void __asmcall slirp_guest_error ( const char *msg, void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + + DBGC ( slirp, "SLIRP %p error: %s\n", slirp, msg ); +} + +/** + * Get virtual clock + * + * @v device Device opaque pointer + * @ret clock_ns Clock time in nanoseconds + */ +static int64_t __asmcall slirp_clock_get_ns ( void *device __unused ) { + int64_t time; + + time = currticks(); + return ( time * ( 1000000 / TICKS_PER_MS ) ); +} + +/** + * Handle timer expiry + * + * @v timer Retry timer + * @v over Failure indicator + */ +static void slirp_expired ( struct retry_timer *timer, int over __unused ) { + struct slirp_alarm *alarm = + container_of ( timer, struct slirp_alarm, timer ); + struct slirp_nic *slirp = alarm->slirp; + + /* Notify callback */ + DBGC ( slirp, "SLIRP %p timer fired\n", slirp ); + alarm->callback ( alarm->opaque ); +} + +/** + * Create a new timer + * + * @v callback Timer callback + * @v opaque Timer opaque pointer + * @v device Device opaque pointer + * @ret timer Timer + */ +static void * __asmcall +slirp_timer_new ( void ( __asmcall * callback ) ( void *opaque ), + void *opaque, void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + struct slirp_alarm *alarm; + + /* Allocate timer */ + alarm = malloc ( sizeof ( *alarm ) ); + if ( ! alarm ) { + DBGC ( slirp, "SLIRP %p could not allocate timer\n", slirp ); + return NULL; + } + + /* Initialise timer */ + memset ( alarm, 0, sizeof ( *alarm ) ); + alarm->slirp = slirp; + timer_init ( &alarm->timer, slirp_expired, NULL ); + alarm->callback = callback; + alarm->opaque = opaque; + DBGC ( slirp, "SLIRP %p timer %p has callback %p (%p)\n", + slirp, alarm, alarm->callback, alarm->opaque ); + + return alarm; +} + +/** + * Delete a timer + * + * @v timer Timer + * @v device Device opaque pointer + */ +static void __asmcall slirp_timer_free ( void *timer, void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + struct slirp_alarm *alarm = timer; + + /* Ignore timers that failed to allocate */ + if ( ! alarm ) + return; + + /* Stop timer */ + stop_timer ( &alarm->timer ); + + /* Free timer */ + free ( alarm ); + DBGC ( slirp, "SLIRP %p timer %p freed\n", slirp, alarm ); +} + +/** + * Set timer expiry time + * + * @v timer Timer + * @v expire Expiry time + * @v device Device opaque pointer + */ +static void __asmcall slirp_timer_mod ( void *timer, int64_t expire, + void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + struct slirp_alarm *alarm = timer; + int64_t timeout_ms; + unsigned long timeout; + + /* Ignore timers that failed to allocate */ + if ( ! alarm ) + return; + + /* (Re)start timer */ + timeout_ms = ( expire - ( currticks() / TICKS_PER_MS ) ); + if ( timeout_ms < 0 ) + timeout_ms = 0; + timeout = ( timeout_ms * TICKS_PER_MS ); + start_timer_fixed ( &alarm->timer, timeout ); + DBGC ( slirp, "SLIRP %p timer %p set for %ld ticks\n", + slirp, alarm, timeout ); +} + +/** + * Register file descriptor for polling + * + * @v fd File descriptor + * @v device Device opaque pointer + */ +static void __asmcall slirp_register_poll_fd ( int fd, void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + + DBGC ( slirp, "SLIRP %p registered FD %d\n", slirp, fd ); +} + +/** + * Unregister file descriptor + * + * @v fd File descriptor + * @v device Device opaque pointer + */ +static void __asmcall slirp_unregister_poll_fd ( int fd, void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + + DBGC ( slirp, "SLIRP %p unregistered FD %d\n", slirp, fd ); +} + +/** + * Notify that new events are ready + * + * @v device Device opaque pointer + */ +static void __asmcall slirp_notify ( void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + + DBGC2 ( slirp, "SLIRP %p notified\n", slirp ); +} + +/** Slirp callbacks */ +static struct slirp_callbacks slirp_callbacks = { + .send_packet = slirp_send_packet, + .guest_error = slirp_guest_error, + .clock_get_ns = slirp_clock_get_ns, + .timer_new = slirp_timer_new, + .timer_free = slirp_timer_free, + .timer_mod = slirp_timer_mod, + .register_poll_fd = slirp_register_poll_fd, + .unregister_poll_fd = slirp_unregister_poll_fd, + .notify = slirp_notify, +}; + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int slirp_open ( struct net_device *netdev ) { + struct slirp_nic *slirp = netdev->priv; + + /* Nothing to do */ + DBGC ( slirp, "SLIRP %p opened\n", slirp ); + + return 0; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void slirp_close ( struct net_device *netdev ) { + struct slirp_nic *slirp = netdev->priv; + + /* Nothing to do */ + DBGC ( slirp, "SLIRP %p closed\n", slirp ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int slirp_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct slirp_nic *slirp = netdev->priv; + + /* Transmit packet */ + linux_slirp_input ( slirp->slirp, iobuf->data, iob_len ( iobuf ) ); + netdev_tx_complete ( netdev, iobuf ); + + return 0; +} + +/** + * Add polling file descriptor + * + * @v fd File descriptor + * @v events Events of interest + * @v device Device opaque pointer + * @ret index File descriptor index + */ +static int __asmcall slirp_add_poll ( int fd, int events, void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + struct pollfd *pollfd; + unsigned int index; + + /* Fail if too many descriptors are registered */ + if ( slirp->numfds >= SLIRP_MAX_FDS ) { + DBGC ( slirp, "SLIRP %p too many file descriptors\n", slirp ); + return -1; + } + + /* Populate polling file descriptor */ + index = slirp->numfds++; + pollfd = &slirp->pollfds[index]; + pollfd->fd = fd; + pollfd->events = 0; + if ( events & SLIRP_EVENT_IN ) + pollfd->events |= POLLIN; + if ( events & SLIRP_EVENT_OUT ) + pollfd->events |= POLLOUT; + if ( events & SLIRP_EVENT_PRI ) + pollfd->events |= POLLPRI; + if ( events & SLIRP_EVENT_ERR ) + pollfd->events |= POLLERR; + if ( events & SLIRP_EVENT_HUP ) + pollfd->events |= ( POLLHUP | POLLRDHUP ); + DBGCP ( slirp, "SLIRP %p polling FD %d event mask %#04x(%#04x)\n", + slirp, fd, events, pollfd->events ); + + return index; +} + +/** + * Get returned events for a file descriptor + * + * @v index File descriptor index + * @v device Device opaque pointer + * @ret events Returned events + */ +static int __asmcall slirp_get_revents ( int index, void *device ) { + struct net_device *netdev = device; + struct slirp_nic *slirp = netdev->priv; + int revents; + int events; + + /* Ignore failed descriptors */ + if ( index < 0 ) + return 0; + + /* Collect events */ + revents = slirp->pollfds[index].revents; + events = 0; + if ( revents & POLLIN ) + events |= SLIRP_EVENT_IN; + if ( revents & POLLOUT ) + events |= SLIRP_EVENT_OUT; + if ( revents & POLLPRI ) + events |= SLIRP_EVENT_PRI; + if ( revents & POLLERR ) + events |= SLIRP_EVENT_ERR; + if ( revents & ( POLLHUP | POLLRDHUP ) ) + events |= SLIRP_EVENT_HUP; + if ( events ) { + DBGC2 ( slirp, "SLIRP %p polled FD %d events %#04x(%#04x)\n", + slirp, slirp->pollfds[index].fd, events, revents ); + } + + return events; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void slirp_poll ( struct net_device *netdev ) { + struct slirp_nic *slirp = netdev->priv; + uint32_t timeout = 0; + int ready; + int error; + + /* Rebuild polling file descriptor list */ + slirp->numfds = 0; + linux_slirp_pollfds_fill ( slirp->slirp, &timeout, + slirp_add_poll, netdev ); + + /* Poll descriptors */ + ready = linux_poll ( slirp->pollfds, slirp->numfds, 0 ); + error = ( ready == -1 ); + linux_slirp_pollfds_poll ( slirp->slirp, error, slirp_get_revents, + netdev ); + + /* Record polling errors */ + if ( error ) { + DBGC ( slirp, "SLIRP %p poll failed: %s\n", + slirp, linux_strerror ( linux_errno ) ); + netdev_rx_err ( netdev, NULL, -ELINUX ( linux_errno ) ); + } +} + +/** Network device operations */ +static struct net_device_operations slirp_operations = { + .open = slirp_open, + .close = slirp_close, + .transmit = slirp_transmit, + .poll = slirp_poll, +}; + +/****************************************************************************** + * + * Linux driver interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v linux Linux device + * @v request Device creation request + * @ret rc Return status code + */ +static int slirp_probe ( struct linux_device *linux, + struct linux_device_request *request ) { + struct net_device *netdev; + struct slirp_nic *slirp; + struct slirp_config config; + int rc; + + /* Allocate device */ + netdev = alloc_etherdev ( sizeof ( *slirp ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &slirp_operations ); + linux_set_drvdata ( linux, netdev ); + snprintf ( linux->dev.name, sizeof ( linux->dev.name ), "host" ); + netdev->dev = &linux->dev; + memcpy ( netdev->hw_addr, slirp_default_mac, ETH_ALEN ); + slirp = netdev->priv; + memset ( slirp, 0, sizeof ( *slirp ) ); + + /* Apply requested settings */ + linux_apply_settings ( &request->settings, + netdev_settings ( netdev ) ); + + /* Initialise default configuration (matching qemu) */ + memset ( &config, 0, sizeof ( config ) ); + config.version = 1; + config.in_enabled = true; + config.vnetwork.s_addr = htonl ( 0x0a000200 ); /* 10.0.2.0 */ + config.vnetmask.s_addr = htonl ( 0xffffff00 ); /* 255.255.255.0 */ + config.vhost.s_addr = htonl ( 0x0a000202 ); /* 10.0.2.2 */ + config.in6_enabled = true; + config.vdhcp_start.s_addr = htonl ( 0x0a00020f ); /* 10.0.2.15 */ + config.vnameserver.s_addr = htonl ( 0x0a000203 ); /* 10.0.2.3 */ + + /* Instantiate device */ + slirp->slirp = linux_slirp_new ( &config, &slirp_callbacks, netdev ); + if ( ! slirp->slirp ) { + DBGC ( slirp, "SLIRP could not instantiate\n" ); + rc = -ENODEV; + goto err_new; + } + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + /* Set link up since there is no concept of link state */ + netdev_link_up ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register: + linux_slirp_cleanup ( slirp->slirp ); + err_new: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v linux Linux device + */ +static void slirp_remove ( struct linux_device *linux ) { + struct net_device *netdev = linux_get_drvdata ( linux ); + struct slirp_nic *slirp = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Shut down device */ + linux_slirp_cleanup ( slirp->slirp ); + + /* Free network device */ + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** Slirp driver */ +struct linux_driver slirp_driver __linux_driver = { + .name = "slirp", + .probe = slirp_probe, + .remove = slirp_remove, + .can_probe = 1, +}; diff --git a/src/drivers/linux/tap.c b/src/drivers/linux/tap.c index db3b7955b..ff1e08bdb 100644 --- a/src/drivers/linux/tap.c +++ b/src/drivers/linux/tap.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/drivers/net/3c90x.c b/src/drivers/net/3c90x.c index 853de2b52..63e07777f 100644 --- a/src/drivers/net/3c90x.c +++ b/src/drivers/net/3c90x.c @@ -249,7 +249,7 @@ static int a3c90x_setup_tx_ring(struct INF_3C90X *p) { DBGP("a3c90x_setup_tx_ring\n"); p->tx_ring = - malloc_dma(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN); + malloc_phys(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN); if (!p->tx_ring) { DBG("Could not allocate TX-ring\n"); @@ -304,7 +304,7 @@ static void a3c90x_free_tx_ring(struct INF_3C90X *p) { DBGP("a3c90x_free_tx_ring\n"); - free_dma(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD)); + free_phys(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD)); p->tx_ring = NULL; /* io_buffers are free()ed by netdev_tx_complete[,_err]() */ } @@ -461,7 +461,7 @@ static int a3c90x_setup_rx_ring(struct INF_3C90X *p) DBGP("a3c90x_setup_rx_ring\n"); p->rx_ring = - malloc_dma(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN); + malloc_phys(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN); if (!p->rx_ring) { DBG("Could not allocate RX-ring\n"); @@ -491,7 +491,7 @@ static void a3c90x_free_rx_ring(struct INF_3C90X *p) { DBGP("a3c90x_free_rx_ring\n"); - free_dma(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD)); + free_phys(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD)); p->rx_ring = NULL; } diff --git a/src/drivers/net/amd8111e.c b/src/drivers/net/amd8111e.c index 693d77d1d..babd12d3c 100644 --- a/src/drivers/net/amd8111e.c +++ b/src/drivers/net/amd8111e.c @@ -664,7 +664,7 @@ static int amd8111e_probe(struct nic *nic, struct pci_device *pdev) memset(lp, 0, sizeof(*lp)); lp->pdev = pdev; lp->nic = nic; - lp->mmio = ioremap(mmio_start, mmio_len); + lp->mmio = pci_ioremap(pdev, mmio_start, mmio_len); lp->opened = 1; adjust_pci_device(pdev); diff --git a/src/drivers/net/ath/ath5k/ath5k.c b/src/drivers/net/ath/ath5k/ath5k.c index a500175a7..e43eb0aaf 100644 --- a/src/drivers/net/ath/ath5k/ath5k.c +++ b/src/drivers/net/ath/ath5k/ath5k.c @@ -280,7 +280,7 @@ static int ath5k_probe(struct pci_device *pdev) */ pci_write_config_byte(pdev, 0x41, 0); - mem = ioremap(pdev->membase, 0x10000); + mem = pci_ioremap(pdev, pdev->membase, 0x10000); if (!mem) { DBG("ath5k: cannot remap PCI memory region\n"); ret = -EIO; @@ -877,7 +877,7 @@ ath5k_desc_alloc(struct ath5k_softc *sc) /* allocate descriptors */ sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1); - sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN); + sc->desc = malloc_phys(sc->desc_len, ATH5K_DESC_ALIGN); if (sc->desc == NULL) { DBG("ath5k: can't allocate descriptors\n"); ret = -ENOMEM; @@ -915,7 +915,7 @@ ath5k_desc_alloc(struct ath5k_softc *sc) return 0; err_free: - free_dma(sc->desc, sc->desc_len); + free_phys(sc->desc, sc->desc_len); err: sc->desc = NULL; return ret; @@ -932,7 +932,7 @@ ath5k_desc_free(struct ath5k_softc *sc) ath5k_rxbuf_free(sc, bf); /* Free memory associated with all descriptors */ - free_dma(sc->desc, sc->desc_len); + free_phys(sc->desc, sc->desc_len); free(sc->bufptr); sc->bufptr = NULL; diff --git a/src/drivers/net/ath/ath5k/ath5k_eeprom.c b/src/drivers/net/ath/ath5k/ath5k_eeprom.c index 983d206b7..46f33d1e8 100644 --- a/src/drivers/net/ath/ath5k/ath5k_eeprom.c +++ b/src/drivers/net/ath/ath5k/ath5k_eeprom.c @@ -39,6 +39,9 @@ static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data) { u32 status, timeout; + /* Avoid returning uninitialised data on error */ + *data = 0xffff; + /* * Initialize EEPROM access */ @@ -416,6 +419,7 @@ ath5k_eeprom_read_turbo_modes(struct ath5k_hw *ah, if (ee->ee_version < AR5K_EEPROM_VERSION_5_0) return 0; + AR5K_EEPROM_READ(o++, val); switch (mode){ case AR5K_EEPROM_MODE_11A: ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f; diff --git a/src/drivers/net/ath/ath9k/ath9k.c b/src/drivers/net/ath/ath9k/ath9k.c index 183aa65f6..98b7ecd5a 100644 --- a/src/drivers/net/ath/ath9k/ath9k.c +++ b/src/drivers/net/ath/ath9k/ath9k.c @@ -138,7 +138,7 @@ static int ath_pci_probe(struct pci_device *pdev) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - mem = ioremap(pdev->membase, 0x10000); + mem = pci_ioremap(pdev, pdev->membase, 0x10000); if (!mem) { DBG("ath9K: PCI memory map error\n") ; ret = -EIO; diff --git a/src/drivers/net/ath/ath9k/ath9k_init.c b/src/drivers/net/ath/ath9k/ath9k_init.c index 98a0d6d59..05ed3336a 100644 --- a/src/drivers/net/ath/ath9k/ath9k_init.c +++ b/src/drivers/net/ath/ath9k/ath9k_init.c @@ -223,7 +223,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, } /* allocate descriptors */ - dd->dd_desc = malloc_dma(dd->dd_desc_len, 16); + dd->dd_desc = malloc_phys(dd->dd_desc_len, 16); if (dd->dd_desc == NULL) { error = -ENOMEM; goto fail; @@ -264,7 +264,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, } return 0; fail2: - free_dma(dd->dd_desc, dd->dd_desc_len); + free_phys(dd->dd_desc, dd->dd_desc_len); fail: memset(dd, 0, sizeof(*dd)); return error; @@ -588,7 +588,7 @@ void ath_descdma_cleanup(struct ath_softc *sc __unused, struct ath_descdma *dd, struct list_head *head) { - free_dma(dd->dd_desc, dd->dd_desc_len); + free_phys(dd->dd_desc, dd->dd_desc_len); INIT_LIST_HEAD(head); free(dd->dd_bufptr); diff --git a/src/drivers/net/atl1e.c b/src/drivers/net/atl1e.c index d010d8c4a..0f0df5326 100644 --- a/src/drivers/net/atl1e.c +++ b/src/drivers/net/atl1e.c @@ -370,7 +370,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) atl1e_clean_rx_ring(adapter); if (adapter->ring_vir_addr) { - free_dma(adapter->ring_vir_addr, adapter->ring_size); + free_phys(adapter->ring_vir_addr, adapter->ring_size); adapter->ring_vir_addr = NULL; adapter->ring_dma = 0; } @@ -405,7 +405,7 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) /* real ring DMA buffer */ size = adapter->ring_size; - adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32); + adapter->ring_vir_addr = malloc_phys(adapter->ring_size, 32); if (adapter->ring_vir_addr == NULL) { DBG("atl1e: out of memory allocating %d bytes for %s ring\n", diff --git a/src/drivers/net/axge.c b/src/drivers/net/axge.c index ab59a8be7..fb274d24f 100644 --- a/src/drivers/net/axge.c +++ b/src/drivers/net/axge.c @@ -213,6 +213,7 @@ static inline int axge_write_dword ( struct axge_device *axge, static int axge_check_link ( struct axge_device *axge ) { struct net_device *netdev = axge->netdev; uint8_t plsr; + uint16_t msr; int rc; /* Read physical link status register */ @@ -222,12 +223,28 @@ static int axge_check_link ( struct axge_device *axge ) { return rc; } + /* Write medium status register */ + msr = cpu_to_le16 ( AXGE_MSR_FD | AXGE_MSR_RFC | AXGE_MSR_TFC | + AXGE_MSR_RE ); + if ( plsr & AXGE_PLSR_EPHY_1000 ) { + msr |= cpu_to_le16 ( AXGE_MSR_GM ); + } else if ( plsr & AXGE_PLSR_EPHY_100 ) { + msr |= cpu_to_le16 ( AXGE_MSR_PS ); + } + if ( ( rc = axge_write_word ( axge, AXGE_MSR, msr ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not write MSR: %s\n", + axge, strerror ( rc ) ); + return rc; + } + /* Update link status */ if ( plsr & AXGE_PLSR_EPHY_ANY ) { - DBGC ( axge, "AXGE %p link up (PLSR %02x)\n", axge, plsr ); + DBGC ( axge, "AXGE %p link up (PLSR %02x MSR %04x)\n", + axge, plsr, msr ); netdev_link_up ( netdev ); } else { - DBGC ( axge, "AXGE %p link down (PLSR %02x)\n", axge, plsr ); + DBGC ( axge, "AXGE %p link down (PLSR %02x MSR %04x)\n", + axge, plsr, msr ); netdev_link_down ( netdev ); } @@ -291,13 +308,8 @@ static void axge_intr_complete ( struct usb_endpoint *ep, /* Extract link status */ link_ok = ( intr->link & cpu_to_le16 ( AXGE_INTR_LINK_PPLS ) ); - if ( link_ok && ! netdev_link_ok ( netdev ) ) { - DBGC ( axge, "AXGE %p link up\n", axge ); - netdev_link_up ( netdev ); - } else if ( netdev_link_ok ( netdev ) && ! link_ok ) { - DBGC ( axge, "AXGE %p link down\n", axge ); - netdev_link_down ( netdev ); - } + if ( ( !! link_ok ) ^ ( !! netdev_link_ok ( netdev ) ) ) + axge->check_link = 1; /* Free I/O buffer */ free_iob ( iobuf ); @@ -519,6 +531,13 @@ static int axge_open ( struct net_device *netdev ) { uint16_t rcr; int rc; + /* Reapply device configuration to avoid transaction errors */ + if ( ( rc = usb_set_configuration ( axge->usb, axge->config ) ) != 0 ) { + DBGC ( axge, "AXGE %p could not set configuration: %s\n", + axge, strerror ( rc ) ); + goto err_set_configuration; + } + /* Open USB network device */ if ( ( rc = usbnet_open ( &axge->usbnet ) ) != 0 ) { DBGC ( axge, "AXGE %p could not open: %s\n", @@ -544,15 +563,18 @@ static int axge_open ( struct net_device *netdev ) { } /* Update link status */ - axge_check_link ( axge ); + if ( ( rc = axge_check_link ( axge ) ) != 0 ) + goto err_check_link; return 0; + err_check_link: axge_write_word ( axge, AXGE_RCR, 0 ); err_write_rcr: err_write_mac: usbnet_close ( &axge->usbnet ); err_open: + err_set_configuration: return rc; } @@ -605,6 +627,15 @@ static void axge_poll ( struct net_device *netdev ) { /* Refill endpoints */ if ( ( rc = usbnet_refill ( &axge->usbnet ) ) != 0 ) netdev_rx_err ( netdev, NULL, rc ); + + /* Update link state, if applicable */ + if ( axge->check_link ) { + if ( ( rc = axge_check_link ( axge ) ) == 0 ) { + axge->check_link = 0; + } else { + netdev_rx_err ( netdev, NULL, rc ); + } + } } /** AXGE network device operations */ @@ -635,7 +666,6 @@ static int axge_probe ( struct usb_function *func, struct net_device *netdev; struct axge_device *axge; uint16_t epprcr; - uint16_t msr; uint8_t csr; int rc; @@ -652,6 +682,7 @@ static int axge_probe ( struct usb_function *func, axge->usb = usb; axge->bus = usb->port->hub->bus; axge->netdev = netdev; + axge->config = config->config; usbnet_init ( &axge->usbnet, func, &axge_intr_operations, &axge_in_operations, &axge_out_operations ); usb_refill_init ( &axge->usbnet.intr, 0, 0, AXGE_INTR_MAX_FILL ); @@ -705,28 +736,20 @@ static int axge_probe ( struct usb_function *func, goto err_write_bicr; } - /* Set medium status */ - msr = cpu_to_le16 ( AXGE_MSR_GM | AXGE_MSR_FD | AXGE_MSR_RFC | - AXGE_MSR_TFC | AXGE_MSR_RE ); - if ( ( rc = axge_write_word ( axge, AXGE_MSR, msr ) ) != 0 ) { - DBGC ( axge, "AXGE %p could not write MSR: %s\n", - axge, strerror ( rc ) ); - goto err_write_msr; - } - /* Register network device */ if ( ( rc = register_netdev ( netdev ) ) != 0 ) goto err_register; /* Update link status */ - axge_check_link ( axge ); + if ( ( rc = axge_check_link ( axge ) ) != 0 ) + goto err_check_link; usb_func_set_drvdata ( func, axge ); return 0; + err_check_link: unregister_netdev ( netdev ); err_register: - err_write_msr: err_write_bicr: err_write_csr: err_write_epprcr_on: diff --git a/src/drivers/net/axge.h b/src/drivers/net/axge.h index 65bf911c5..e22e0ec47 100644 --- a/src/drivers/net/axge.h +++ b/src/drivers/net/axge.h @@ -49,6 +49,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define AXGE_MSR_RFC 0x0010 /**< RX flow control enable */ #define AXGE_MSR_TFC 0x0020 /**< TX flow control enable */ #define AXGE_MSR_RE 0x0100 /**< Receive enable */ +#define AXGE_MSR_PS 0x0200 /**< 100Mbps port speed */ /** Ethernet PHY Power and Reset Control Register */ #define AXGE_EPPRCR 0x26 @@ -144,6 +145,10 @@ struct axge_device { struct net_device *netdev; /** USB network device */ struct usbnet_device usbnet; + /** Device configuration */ + unsigned int config; + /** Link state has changed */ + int check_link; }; /** Interrupt maximum fill level diff --git a/src/drivers/net/b44.c b/src/drivers/net/b44.c index d9aeb1b4b..eaf6d35ce 100644 --- a/src/drivers/net/b44.c +++ b/src/drivers/net/b44.c @@ -436,7 +436,7 @@ static void b44_free_rx_ring(struct b44_private *bp) free_iob(bp->rx_iobuf[i]); bp->rx_iobuf[i] = NULL; } - free_dma(bp->rx, B44_RX_RING_LEN_BYTES); + free_phys(bp->rx, B44_RX_RING_LEN_BYTES); bp->rx = NULL; } } @@ -446,11 +446,11 @@ static int b44_init_rx_ring(struct b44_private *bp) { b44_free_rx_ring(bp); - bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT); + bp->rx = malloc_phys(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT); if (!bp->rx) return -ENOMEM; if (!b44_address_ok(bp->rx)) { - free_dma(bp->rx, B44_RX_RING_LEN_BYTES); + free_phys(bp->rx, B44_RX_RING_LEN_BYTES); return -ENOTSUP; } @@ -468,7 +468,7 @@ static int b44_init_rx_ring(struct b44_private *bp) static void b44_free_tx_ring(struct b44_private *bp) { if (bp->tx) { - free_dma(bp->tx, B44_TX_RING_LEN_BYTES); + free_phys(bp->tx, B44_TX_RING_LEN_BYTES); bp->tx = NULL; } } @@ -478,11 +478,11 @@ static int b44_init_tx_ring(struct b44_private *bp) { b44_free_tx_ring(bp); - bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT); + bp->tx = malloc_phys(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT); if (!bp->tx) return -ENOMEM; if (!b44_address_ok(bp->tx)) { - free_dma(bp->tx, B44_TX_RING_LEN_BYTES); + free_phys(bp->tx, B44_TX_RING_LEN_BYTES); return -ENOTSUP; } @@ -673,7 +673,7 @@ static int b44_probe(struct pci_device *pci) bp->pci = pci; /* Map device registers */ - bp->regs = ioremap(pci->membase, B44_REGS_SIZE); + bp->regs = pci_ioremap(pci, pci->membase, B44_REGS_SIZE); if (!bp->regs) { netdev_put(netdev); return -ENOMEM; diff --git a/src/drivers/net/bnx2.c b/src/drivers/net/bnx2.c index 4ebcc52a9..d5783ff99 100644 --- a/src/drivers/net/bnx2.c +++ b/src/drivers/net/bnx2.c @@ -2152,7 +2152,7 @@ bnx2_init_board(struct pci_device *pdev, struct nic *nic) bnx2reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0); bnx2reg_len = MB_GET_CID_ADDR(17); - bp->regview = ioremap(bnx2reg_base, bnx2reg_len); + bp->regview = pci_ioremap(pdev, bnx2reg_base, bnx2reg_len); if (!bp->regview) { printf("Cannot map register space, aborting.\n"); diff --git a/src/drivers/net/bnxt/bnxt.c b/src/drivers/net/bnxt/bnxt.c new file mode 100644 index 000000000..e3876503f --- /dev/null +++ b/src/drivers/net/bnxt/bnxt.c @@ -0,0 +1,2228 @@ + +FILE_LICENCE ( GPL2_ONLY ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnxt.h" +#include "bnxt_dbg.h" + +static void bnxt_service_cq ( struct net_device *dev ); +static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx ); +static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt ); +static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt ); +static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx ); +void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ); + +static struct pci_device_id bnxt_nics[] = { + PCI_ROM( 0x14e4, 0x16c0, "14e4-16C0", "14e4-16C0", 0 ), + PCI_ROM( 0x14e4, 0x16c1, "14e4-16C1", "14e4-16C1", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x16c8, "14e4-16C8", "14e4-16C8", 0 ), + PCI_ROM( 0x14e4, 0x16c9, "14e4-16C9", "14e4-16C9", 0 ), + PCI_ROM( 0x14e4, 0x16ca, "14e4-16CA", "14e4-16CA", 0 ), + PCI_ROM( 0x14e4, 0x16cc, "14e4-16CC", "14e4-16CC", 0 ), + PCI_ROM( 0x14e4, 0x16cd, "14e4-16CD", "14e4-16CD", 0 ), + PCI_ROM( 0x14e4, 0x16ce, "14e4-16CE", "14e4-16CE", 0 ), + PCI_ROM( 0x14e4, 0x16cf, "14e4-16CF", "14e4-16CF", 0 ), + PCI_ROM( 0x14e4, 0x16d0, "14e4-16D0", "14e4-16D0", 0 ), + PCI_ROM( 0x14e4, 0x16d1, "14e4-16D1", "14e4-16D1", 0 ), + PCI_ROM( 0x14e4, 0x16d2, "14e4-16D2", "14e4-16D2", 0 ), + PCI_ROM( 0x14e4, 0x16d4, "14e4-16D4", "14e4-16D4", 0 ), + PCI_ROM( 0x14e4, 0x16d5, "14e4-16D5", "14e4-16D5", 0 ), + PCI_ROM( 0x14e4, 0x16d6, "14e4-16D6", "14e4-16D6", 0 ), + PCI_ROM( 0x14e4, 0x16d7, "14e4-16D7", "14e4-16D7", 0 ), + PCI_ROM( 0x14e4, 0x16d8, "14e4-16D8", "14e4-16D8", 0 ), + PCI_ROM( 0x14e4, 0x16d9, "14e4-16D9", "14e4-16D9", 0 ), + PCI_ROM( 0x14e4, 0x16da, "14e4-16DA", "14e4-16DA", 0 ), + PCI_ROM( 0x14e4, 0x16db, "14e4-16DB", "14e4-16DB", 0 ), + PCI_ROM( 0x14e4, 0x16dc, "14e4-16DC", "14e4-16DC", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x16de, "14e4-16DE", "14e4-16DE", 0 ), + PCI_ROM( 0x14e4, 0x16df, "14e4-16DF", "14e4-16DF", 0 ), + PCI_ROM( 0x14e4, 0x16e0, "14e4-16E0", "14e4-16E0", 0 ), + PCI_ROM( 0x14e4, 0x16e2, "14e4-16E2", "14e4-16E2", 0 ), + PCI_ROM( 0x14e4, 0x16e3, "14e4-16E3", "14e4-16E3", 0 ), + PCI_ROM( 0x14e4, 0x16e4, "14e4-16E4", "14e4-16E4", 0 ), + PCI_ROM( 0x14e4, 0x16e7, "14e4-16E7", "14e4-16E7", 0 ), + PCI_ROM( 0x14e4, 0x16e8, "14e4-16E8", "14e4-16E8", 0 ), + PCI_ROM( 0x14e4, 0x16e9, "14e4-16E9", "14e4-16E9", 0 ), + PCI_ROM( 0x14e4, 0x16ea, "14e4-16EA", "14e4-16EA", 0 ), + PCI_ROM( 0x14e4, 0x16eb, "14e4-16EB", "14e4-16EB", 0 ), + PCI_ROM( 0x14e4, 0x16ec, "14e4-16EC", "14e4-16EC", 0 ), + PCI_ROM( 0x14e4, 0x16ed, "14e4-16ED", "14e4-16ED", 0 ), + PCI_ROM( 0x14e4, 0x16ee, "14e4-16EE", "14e4-16EE", 0 ), + PCI_ROM( 0x14e4, 0x16ef, "14e4-16EF", "14e4-16EF", 0 ), + PCI_ROM( 0x14e4, 0x16f0, "14e4-16F0", "14e4-16F0", 0 ), + PCI_ROM( 0x14e4, 0x16f1, "14e4-16F1", "14e4-16F1", 0 ), + PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "14e4-1604", 0 ), + PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "14e4-1605", 0 ), + PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "14e4-1606", 0 ), + PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "14e4-1609", 0 ), + PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "14e4-1614", 0 ), + PCI_ROM( 0x14e4, 0xd802, "14e4-D802", "14e4-D802", 0 ), + PCI_ROM( 0x14e4, 0xd804, "14e4-D804", "14e4-D804", 0 ), + PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "14e4-1750", 0 ), + PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "14e4-1802", 0 ), + PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "14e4-1805", 0 ), + PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "14e4-1751", 0 ), + PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "14e4-1801", 0 ), + PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "14e4-1804", 0 ), + PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "14e4-1752", 0 ), + PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "14e4-1800", 0 ), + PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "14e4-1803", 0 ), + PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "14e4-1806", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "14e4-1807", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "14e4-1808", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "14e4-1809", BNXT_FLAG_PCI_VF ), +}; + +/** + * Check if Virtual Function + */ +u8 bnxt_is_pci_vf ( struct pci_device *pdev ) +{ + if ( FLAG_TEST ( pdev->id->driver_data, BNXT_FLAG_PCI_VF ) ) { + return 1; + } + return 0; +} + +static void bnxt_down_pci ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + if ( bp->bar2 ) { + iounmap ( bp->bar2 ); + bp->bar2 = NULL; + } + if ( bp->bar1 ) { + iounmap ( bp->bar1 ); + bp->bar1 = NULL; + } + if ( bp->bar0 ) { + iounmap ( bp->bar0 ); + bp->bar0 = NULL; + } +} + +static void *bnxt_pci_base ( struct pci_device *pdev, unsigned int reg ) +{ + unsigned long reg_base, reg_size; + + reg_base = pci_bar_start ( pdev, reg ); + reg_size = pci_bar_size ( pdev, reg ); + return pci_ioremap ( pdev, reg_base, reg_size ); +} + +static int bnxt_get_pci_info ( struct bnxt *bp ) +{ + u16 cmd_reg = 0; + + DBGP ( "%s\n", __func__ ); + /* Disable Interrupt */ + pci_read_word16 ( bp->pdev, PCI_COMMAND, &bp->cmd_reg ); + cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE; + pci_write_word ( bp->pdev, PCI_COMMAND, cmd_reg ); + pci_read_word16 ( bp->pdev, PCI_COMMAND, &cmd_reg ); + + /* SSVID */ + pci_read_word16 ( bp->pdev, + PCI_SUBSYSTEM_VENDOR_ID, + &bp->subsystem_vendor ); + + /* SSDID */ + pci_read_word16 ( bp->pdev, + PCI_SUBSYSTEM_ID, + &bp->subsystem_device ); + + /* Function Number */ + pci_read_byte ( bp->pdev, + PCICFG_ME_REGISTER, + &bp->pf_num ); + + /* Get Bar Address */ + bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 ); + bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 ); + bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 ); + + /* Virtual function */ + bp->vf = bnxt_is_pci_vf ( bp->pdev ); + + dbg_pci ( bp, __func__, cmd_reg ); + return STATUS_SUCCESS; +} + +static int bnxt_get_device_address ( struct bnxt *bp ) +{ + struct net_device *dev = bp->dev; + + DBGP ( "%s\n", __func__ ); + memcpy ( &dev->hw_addr[0], ( char * )&bp->mac_addr[0], ETH_ALEN ); + if ( !is_valid_ether_addr ( &dev->hw_addr[0] ) ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return -EINVAL; + } + + return STATUS_SUCCESS; +} + +static void bnxt_set_link ( struct bnxt *bp ) +{ + if ( bp->link_status == STATUS_LINK_ACTIVE ) + netdev_link_up ( bp->dev ); + else + netdev_link_down ( bp->dev ); +} + +static void thor_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag ) +{ + void *off; + u64 val; + + if ( bp->vf ) + off = ( void * ) ( bp->bar1 + DB_OFFSET_VF ); + else + off = ( void * ) ( bp->bar1 + DB_OFFSET_PF ); + + val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) | + ( u64 )DBC_MSG_IDX ( idx ); + write64 ( val, off ); +} + +static void bnxt_db_nq ( struct bnxt *bp ) +{ + if ( bp->thor ) + thor_db ( bp, ( u32 )bp->nq.cons_id, + ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM ); + else + write32 ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) ); +} + +static void bnxt_db_cq ( struct bnxt *bp ) +{ + if ( bp->thor ) + thor_db ( bp, ( u32 )bp->cq.cons_id, + ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL ); + else + write32 ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ), + ( bp->bar1 + 0 ) ); +} + +static void bnxt_db_rx ( struct bnxt *bp, u32 idx ) +{ + if ( bp->thor ) + thor_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ ); + else + write32 ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) ); +} + +static void bnxt_db_tx ( struct bnxt *bp, u32 idx ) +{ + if ( bp->thor ) + thor_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ ); + else + write32 ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ), + ( bp->bar1 + 0 ) ); +} + +void bnxt_add_vlan ( struct io_buffer *iob, u16 vlan ) +{ + char *src = ( char * )iob->data; + u16 len = iob_len ( iob ); + + memmove ( ( char * )&src[MAC_HDR_SIZE + VLAN_HDR_SIZE], + ( char * )&src[MAC_HDR_SIZE], + ( len - MAC_HDR_SIZE ) ); + + * ( u16 * ) ( &src[MAC_HDR_SIZE] ) = BYTE_SWAP_S ( ETHERTYPE_VLAN ); + * ( u16 * ) ( &src[MAC_HDR_SIZE + 2] ) = BYTE_SWAP_S ( vlan ); + iob_put ( iob, VLAN_HDR_SIZE ); +} + +static u16 bnxt_get_pkt_vlan ( char *src ) +{ + if ( * ( ( u16 * )&src[MAC_HDR_SIZE] ) == BYTE_SWAP_S ( ETHERTYPE_VLAN ) ) + return BYTE_SWAP_S ( * ( ( u16 * )&src[MAC_HDR_SIZE + 2] ) ); + return 0; +} + +int bnxt_vlan_drop ( struct bnxt *bp, u16 rx_vlan ) +{ + if ( rx_vlan ) { + if ( bp->vlan_tx ) { + if ( rx_vlan == bp->vlan_tx ) + return 0; + } else { + if ( rx_vlan == bp->vlan_id ) + return 0; + if ( rx_vlan && !bp->vlan_id ) + return 0; + } + } else { + if ( !bp->vlan_tx && !bp->vlan_id ) + return 0; + } + + return 1; +} + +static inline u32 bnxt_tx_avail ( struct bnxt *bp ) +{ + u32 avail; + u32 use; + + barrier ( ); + avail = TX_AVAIL ( bp->tx.ring_cnt ); + use = TX_IN_USE ( bp->tx.prod_id, bp->tx.cons_id, bp->tx.ring_cnt ); + dbg_tx_avail ( bp, avail, use ); + return ( avail-use ); +} + +void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len ) +{ + struct tx_bd_short *prod_bd; + + prod_bd = ( struct tx_bd_short * )BD_NOW ( bp->tx.bd_virt, + entry, sizeof ( struct tx_bd_short ) ); + if ( len < 512 ) + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT512; + else if ( len < 1024 ) + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT1K; + else if ( len < 2048 ) + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT2K; + else + prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_GTE2K; + prod_bd->flags_type |= TX_BD_FLAGS; + prod_bd->dma.addr = mapping; + prod_bd->len = len; + prod_bd->opaque = ( u32 )entry; +} + +static void bnxt_tx_complete ( struct net_device *dev, u16 hw_idx ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct io_buffer *iob; + + iob = bp->tx.iob[hw_idx]; + dbg_tx_done ( iob->data, iob_len ( iob ), hw_idx ); + netdev_tx_complete ( dev, iob ); + bp->tx.cons_id = NEXT_IDX ( hw_idx, bp->tx.ring_cnt ); + bp->tx.cnt++; + dump_tx_stat ( bp ); +} + +int bnxt_free_rx_iob ( struct bnxt *bp ) +{ + unsigned int i; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RX_IOB ) ) ) + return STATUS_SUCCESS; + + for ( i = 0; i < bp->rx.buf_cnt; i++ ) { + if ( bp->rx.iob[i] ) { + free_iob ( bp->rx.iob[i] ); + bp->rx.iob[i] = NULL; + } + } + bp->rx.iob_cnt = 0; + + FLAG_RESET ( bp->flag_hwrm, VALID_RX_IOB ); + return STATUS_SUCCESS; +} + +static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob, + u16 cid, u32 idx ) +{ + struct rx_prod_pkt_bd *desc; + u16 off = cid * sizeof ( struct rx_prod_pkt_bd ); + + desc = ( struct rx_prod_pkt_bd * )&buf[off]; + desc->flags_type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; + desc->len = MAX_ETHERNET_PACKET_BUFFER_SIZE; + desc->opaque = idx; + desc->dma.addr = virt_to_bus ( iob->data ); +} + +static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx ) +{ + struct io_buffer *iob; + + iob = alloc_iob ( BNXT_RX_STD_DMA_SZ ); + if ( !iob ) { + DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ ); + return -ENOMEM; + } + + dbg_alloc_rx_iob ( iob, iob_idx, cons_id ); + bnxt_set_rx_desc ( ( u8 * )bp->rx.bd_virt, iob, cons_id, + ( u32 ) iob_idx ); + bp->rx.iob[iob_idx] = iob; + return 0; +} + +int bnxt_post_rx_buffers ( struct bnxt *bp ) +{ + u16 cons_id = ( bp->rx.cons_id % bp->rx.ring_cnt ); + u16 iob_idx; + + while ( bp->rx.iob_cnt < bp->rx.buf_cnt ) { + iob_idx = ( cons_id % bp->rx.buf_cnt ); + if ( !bp->rx.iob[iob_idx] ) { + if ( bnxt_alloc_rx_iob ( bp, cons_id, iob_idx ) < 0 ) { + dbg_alloc_rx_iob_fail ( iob_idx, cons_id ); + break; + } + } + cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt ); + bp->rx.iob_cnt++; + } + + if ( cons_id != bp->rx.cons_id ) { + dbg_rx_cid ( bp->rx.cons_id, cons_id ); + bp->rx.cons_id = cons_id; + bnxt_db_rx ( bp, ( u32 )cons_id ); + } + + FLAG_SET ( bp->flag_hwrm, VALID_RX_IOB ); + return STATUS_SUCCESS; +} + +u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob, + struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len ) +{ + u8 *rx_buf = ( u8 * )iob->data; + u16 err_flags, rx_vlan; + u8 ignore_chksum_err = 0; + int i; + + err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT; + if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 ) + ignore_chksum_err = 1; + + if ( err_flags && !ignore_chksum_err ) { + bp->rx.drop_err++; + return 1; + } + + for ( i = 0; i < 6; i++ ) { + if ( rx_buf[6 + i] != bp->mac_addr[i] ) + break; + } + + /* Drop the loopback packets */ + if ( i == 6 ) { + bp->rx.drop_lb++; + return 2; + } + + /* Get VLAN ID from RX completion ring */ + if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN ) + rx_vlan = ( rx_cmp_hi->metadata & + RX_PKT_CMPL_METADATA_VID_MASK ); + else + rx_vlan = 0; + + dbg_rx_vlan ( bp, rx_cmp_hi->metadata, rx_cmp_hi->flags2, rx_vlan ); + if ( bnxt_vlan_drop ( bp, rx_vlan ) ) { + bp->rx.drop_vlan++; + return 3; + } + iob_put ( iob, rx_len ); + + if ( rx_vlan ) + bnxt_add_vlan ( iob, rx_vlan ); + + bp->rx.good++; + return 0; +} + +static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt ) +{ + u16 cons_id; + + cons_id = bp->cq.cons_id + cnt; + if ( cons_id >= MAX_CQ_DESC_CNT ) { + /* Toggle completion bit when the ring wraps. */ + bp->cq.completion_bit ^= 1; + cons_id = cons_id - MAX_CQ_DESC_CNT; + } + bp->cq.cons_id = cons_id; +} + +void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp, + struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi ) +{ + u32 desc_idx = rx_cmp->opaque; + struct io_buffer *iob = bp->rx.iob[desc_idx]; + u8 drop; + + dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx ); + assert ( !iob ); + drop = bnxt_rx_drop ( bp, iob, rx_cmp_hi, rx_cmp->len ); + dbg_rxp ( iob->data, rx_cmp->len, drop ); + if ( drop ) + netdev_rx_err ( dev, iob, -EINVAL ); + else + netdev_rx ( dev, iob ); + + bp->rx.cnt++; + bp->rx.iob[desc_idx] = NULL; + bp->rx.iob_cnt--; + bnxt_post_rx_buffers ( bp ); + bnxt_adv_cq_index ( bp, 2 ); /* Rx completion is 2 entries. */ + dbg_rx_stat ( bp ); +} + +static int bnxt_rx_complete ( struct net_device *dev, + struct rx_pkt_cmpl *rx_cmp ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct rx_pkt_cmpl_hi *rx_cmp_hi; + u8 cmpl_bit = bp->cq.completion_bit; + + if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) { + rx_cmp_hi = ( struct rx_pkt_cmpl_hi * )bp->cq.bd_virt; + cmpl_bit ^= 0x1; /* Ring has wrapped. */ + } else + rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 ); + + if ( ! ( ( rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2 ) ^ cmpl_bit ) ) { + bnxt_rx_process ( dev, bp, rx_cmp, rx_cmp_hi ); + return SERVICE_NEXT_CQ_BD; + } else + return NO_MORE_CQ_BD_TO_SERVICE; +} + +void bnxt_mm_init ( struct bnxt *bp, const char *func ) +{ + DBGP ( "%s\n", __func__ ); + memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE ); + memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE ); + memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE ); + bp->req_addr_mapping = virt_to_bus ( bp->hwrm_addr_req ); + bp->resp_addr_mapping = virt_to_bus ( bp->hwrm_addr_resp ); + bp->dma_addr_mapping = virt_to_bus ( bp->hwrm_addr_dma ); + bp->link_status = STATUS_LINK_DOWN; + bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; + bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + bp->nq.ring_cnt = MAX_NQ_DESC_CNT; + bp->cq.ring_cnt = MAX_CQ_DESC_CNT; + bp->tx.ring_cnt = MAX_TX_DESC_CNT; + bp->rx.ring_cnt = MAX_RX_DESC_CNT; + bp->rx.buf_cnt = NUM_RX_BUFFERS; + dbg_mem ( bp, func ); +} + +void bnxt_mm_nic ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE ); + memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE ); + memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE ); + memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE ); + bp->nq.cons_id = 0; + bp->nq.completion_bit = 0x1; + bp->cq.cons_id = 0; + bp->cq.completion_bit = 0x1; + bp->tx.prod_id = 0; + bp->tx.cons_id = 0; + bp->rx.cons_id = 0; + bp->rx.iob_cnt = 0; + + bp->link_status = STATUS_LINK_DOWN; + bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; + bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + bp->nq.ring_cnt = MAX_NQ_DESC_CNT; + bp->cq.ring_cnt = MAX_CQ_DESC_CNT; + bp->tx.ring_cnt = MAX_TX_DESC_CNT; + bp->rx.ring_cnt = MAX_RX_DESC_CNT; + bp->rx.buf_cnt = NUM_RX_BUFFERS; +} + +void bnxt_free_mem ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + if ( bp->nq.bd_virt ) { + free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE ); + bp->nq.bd_virt = NULL; + } + + if ( bp->cq.bd_virt ) { + free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE ); + bp->cq.bd_virt = NULL; + } + + if ( bp->rx.bd_virt ) { + free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE ); + bp->rx.bd_virt = NULL; + } + + if ( bp->tx.bd_virt ) { + free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE ); + bp->tx.bd_virt = NULL; + } + + if ( bp->hwrm_addr_dma ) { + free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE ); + bp->dma_addr_mapping = 0; + bp->hwrm_addr_dma = NULL; + } + + if ( bp->hwrm_addr_resp ) { + free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE ); + bp->resp_addr_mapping = 0; + bp->hwrm_addr_resp = NULL; + } + + if ( bp->hwrm_addr_req ) { + free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE ); + bp->req_addr_mapping = 0; + bp->hwrm_addr_req = NULL; + } + DBGP ( "- %s ( ): - Done\n", __func__ ); +} + +int bnxt_alloc_mem ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE, + BNXT_DMA_ALIGNMENT ); + bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); + bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); + bp->cq.bd_virt = malloc_phys ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + bp->nq.bd_virt = malloc_phys ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + test_if ( bp->hwrm_addr_req && + bp->hwrm_addr_resp && + bp->hwrm_addr_dma && + bp->tx.bd_virt && + bp->rx.bd_virt && + bp->nq.bd_virt && + bp->cq.bd_virt ) { + bnxt_mm_init ( bp, __func__ ); + return STATUS_SUCCESS; + } + + DBGP ( "- %s ( ): Failed\n", __func__ ); + bnxt_free_mem ( bp ); + return -ENOMEM; +} + +static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len ) +{ + memset ( req, 0, len ); + req->req_type = cmd; + req->cmpl_ring = ( u16 )HWRM_NA_SIGNATURE; + req->target_id = ( u16 )HWRM_NA_SIGNATURE; + req->resp_addr = bp->resp_addr_mapping; + req->seq_id = bp->seq_id++; +} + +static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt ) +{ + u32 i = 0; + + for ( i = 0; i < cnt; i++ ) { + write32 ( ( ( u32 * )req )[i], + ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) ); + } + write32 ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) ); +} + +static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len ) +{ + struct hwrm_short_input sreq; + + memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) ); + sreq.req_type = ( u16 ) ( ( struct input * )bp->hwrm_addr_req )->req_type; + sreq.signature = SHORT_REQ_SIGNATURE_SHORT_CMD; + sreq.size = len; + sreq.req_addr = bp->req_addr_mapping; + mdelay ( 100 ); + dbg_short_cmd ( ( u8 * )&sreq, __func__, + sizeof ( struct hwrm_short_input ) ); + hwrm_write_req ( bp, &sreq, sizeof ( struct hwrm_short_input ) / 4 ); +} + +static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func ) +{ + struct input *req = ( struct input * )bp->hwrm_addr_req; + struct output *resp = ( struct output * )bp->hwrm_addr_resp; + u8 *ptr = ( u8 * )resp; + u32 idx; + u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo ); + u16 resp_len = 0; + u16 ret = STATUS_TIMEOUT; + + if ( len > bp->hwrm_max_req_len ) + short_hwrm_cmd_req ( bp, len ); + else + hwrm_write_req ( bp, req, ( u32 ) ( len / 4 ) ); + + for ( idx = 0; idx < wait_cnt; idx++ ) { + resp_len = resp->resp_len; + test_if ( resp->seq_id == req->seq_id && + resp->req_type == req->req_type && + ptr[resp_len - 1] == 1 ) { + bp->last_resp_code = resp->error_code; + ret = resp->error_code; + break; + } + udelay ( HWRM_CMD_POLL_WAIT_TIME ); + } + dbg_hw_cmd ( bp, func, len, resp_len, tmo, ret ); + return ( int )ret; +} + +static int bnxt_hwrm_ver_get ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ver_get_input ); + struct hwrm_ver_get_input *req; + struct hwrm_ver_get_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_ver_get_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_ver_get_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len ); + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + rc = wait_resp ( bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__ ); + if ( rc ) + return STATUS_FAILURE; + + bp->hwrm_spec_code = + resp->hwrm_intf_maj_8b << 16 | + resp->hwrm_intf_min_8b << 8 | + resp->hwrm_intf_upd_8b; + bp->hwrm_cmd_timeout = ( u32 )resp->def_req_timeout; + if ( !bp->hwrm_cmd_timeout ) + bp->hwrm_cmd_timeout = ( u32 )HWRM_CMD_DEFAULT_TIMEOUT; + if ( resp->hwrm_intf_maj_8b >= 1 ) + bp->hwrm_max_req_len = resp->max_req_win_len; + bp->chip_id = + resp->chip_rev << 24 | + resp->chip_metal << 16 | + resp->chip_bond_id << 8 | + resp->chip_platform_type; + bp->chip_num = resp->chip_num; + test_if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) && + ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) ) + FLAG_SET ( bp->flags, BNXT_FLAG_HWRM_SHORT_CMD_SUPP ); + bp->hwrm_max_ext_req_len = resp->max_ext_req_len; + if ( bp->chip_num == CHIP_NUM_57500 ) + bp->thor = 1; + dbg_fw_ver ( resp, bp->hwrm_cmd_timeout ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_resource_qcaps ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_resource_qcaps_input ); + struct hwrm_func_resource_qcaps_input *req; + struct hwrm_func_resource_qcaps_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_resource_qcaps_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_func_resource_qcaps_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS, + cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc != STATUS_SUCCESS ) + return STATUS_SUCCESS; + + FLAG_SET ( bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT ); + + // VFs + if ( !bp->vf ) { + bp->max_vfs = resp->max_vfs; + bp->vf_res_strategy = resp->vf_reservation_strategy; + } + + // vNICs + bp->min_vnics = resp->min_vnics; + bp->max_vnics = resp->max_vnics; + + // MSI-X + bp->max_msix = resp->max_msix; + + // Ring Groups + bp->min_hw_ring_grps = resp->min_hw_ring_grps; + bp->max_hw_ring_grps = resp->max_hw_ring_grps; + + // TX Rings + bp->min_tx_rings = resp->min_tx_rings; + bp->max_tx_rings = resp->max_tx_rings; + + // RX Rings + bp->min_rx_rings = resp->min_rx_rings; + bp->max_rx_rings = resp->max_rx_rings; + + // Completion Rings + bp->min_cp_rings = resp->min_cmpl_rings; + bp->max_cp_rings = resp->max_cmpl_rings; + + // RSS Contexts + bp->min_rsscos_ctxs = resp->min_rsscos_ctx; + bp->max_rsscos_ctxs = resp->max_rsscos_ctx; + + // L2 Contexts + bp->min_l2_ctxs = resp->min_l2_ctxs; + bp->max_l2_ctxs = resp->max_l2_ctxs; + + // Statistic Contexts + bp->min_stat_ctxs = resp->min_stat_ctx; + bp->max_stat_ctxs = resp->max_stat_ctx; + dbg_func_resource_qcaps ( bp ); + return STATUS_SUCCESS; +} + +static u32 bnxt_set_ring_info ( struct bnxt *bp ) +{ + u32 enables = 0; + + DBGP ( "%s\n", __func__ ); + bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS; + bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS; + bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS; + bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS; + bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS; + + if ( bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS ) + bp->num_cmpl_rings = bp->min_cp_rings; + + if ( bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS ) + bp->num_tx_rings = bp->min_tx_rings; + + if ( bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS ) + bp->num_rx_rings = bp->min_rx_rings; + + if ( bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS ) + bp->num_hw_ring_grps = bp->min_hw_ring_grps; + + if ( bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS ) + bp->num_stat_ctxs = bp->min_stat_ctxs; + + dbg_num_rings ( bp ); + enables = ( FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS ); + return enables; +} + +static void bnxt_hwrm_assign_resources ( struct bnxt *bp ) +{ + struct hwrm_func_cfg_input *req; + u32 enables = 0; + + DBGP ( "%s\n", __func__ ); + if ( FLAG_TEST ( bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT ) ) + enables = bnxt_set_ring_info ( bp ); + + req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + req->num_cmpl_rings = bp->num_cmpl_rings; + req->num_tx_rings = bp->num_tx_rings; + req->num_rx_rings = bp->num_rx_rings; + req->num_stat_ctxs = bp->num_stat_ctxs; + req->num_hw_ring_grps = bp->num_hw_ring_grps; + req->enables = enables; +} + +static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcaps_input ); + struct hwrm_func_qcaps_input *req; + struct hwrm_func_qcaps_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( bp->vf ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_qcaps_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_func_qcaps_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + bp->fid = resp->fid; + bp->port_idx = ( u8 )resp->port_id; + + /* Get MAC address for this PF */ + memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN ); + dbg_func_qcaps ( bp ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_qcfg_input ); + struct hwrm_func_qcfg_input *req; + struct hwrm_func_qcfg_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_qcfg_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_func_qcfg_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + if ( resp->flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST ) + FLAG_SET ( bp->flags, BNXT_FLAG_MULTI_HOST ); + + if ( resp->port_partition_type & + FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 ) + FLAG_SET ( bp->flags, BNXT_FLAG_NPAR_MODE ); + + bp->ordinal_value = ( u8 )resp->pci_id & 0x0F; + bp->stat_ctx_id = resp->stat_ctx_id; + + /* If VF is set to TRUE, then use some data from func_qcfg ( ). */ + if ( bp->vf ) { + bp->fid = resp->fid; + bp->port_idx = ( u8 )resp->port_id; + bp->vlan_id = resp->vlan; + + /* Get MAC address for this VF */ + memcpy ( bp->mac_addr, resp->mac_address, ETH_ALEN ); + } + dbg_func_qcfg ( bp ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_reset_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_reset_input ); + struct hwrm_func_reset_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_reset_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len ); + if ( !bp->vf ) + req->func_reset_level = FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME; + + return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ ); +} + +static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input ); + struct hwrm_func_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( bp->vf ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + bnxt_hwrm_assign_resources ( bp ); + if ( bp->thor ) { + req->enables |= ( FUNC_CFG_REQ_ENABLES_NUM_MSIX | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_EVB_MODE ); + req->num_msix = 1; + req->num_vnics = 1; + req->evb_mode = FUNC_CFG_REQ_EVB_MODE_NO_EVB; + } + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_rgtr_input ); + struct hwrm_func_drv_rgtr_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_func_drv_rgtr_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len ); + + /* Register with HWRM */ + req->enables = FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD | + FUNC_DRV_RGTR_REQ_ENABLES_VER; + req->async_event_fwd[0] |= 0x01; + req->os_type = FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER; + req->ver_maj = IPXE_VERSION_MAJOR; + req->ver_min = IPXE_VERSION_MINOR; + req->ver_upd = IPXE_VERSION_UPDATE; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_DRIVER_REG ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_unrgtr_input ); + struct hwrm_func_drv_unrgtr_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_drv_unrgtr_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len ); + req->flags = FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) + return STATUS_FAILURE; + + FLAG_RESET ( bp->flag_hwrm, VALID_DRIVER_REG ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_set_async_event ( struct bnxt *bp ) +{ + int rc; + u16 idx; + + DBGP ( "%s\n", __func__ ); + if ( bp->thor ) + idx = bp->nq_ring_id; + else + idx = bp->cq_ring_id; + if ( bp->vf ) { + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input ); + struct hwrm_func_vf_cfg_input *req; + + req = ( struct hwrm_func_vf_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG, + cmd_len ); + req->enables = VF_CFG_ENABLE_FLAGS; + req->async_event_cr = idx; + req->mtu = bp->mtu; + req->guest_vlan = bp->vlan_id; + memcpy ( ( char * )&req->dflt_mac_addr[0], bp->mac_addr, + ETH_ALEN ); + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + } else { + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input ); + struct hwrm_func_cfg_input *req; + + req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len ); + req->fid = ( u16 )HWRM_NA_SIGNATURE; + req->enables = FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR; + req->async_event_cr = idx; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + } + return rc; +} + +static int bnxt_hwrm_cfa_l2_filter_alloc ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_alloc_input ); + struct hwrm_cfa_l2_filter_alloc_input *req; + struct hwrm_cfa_l2_filter_alloc_output *resp; + int rc; + u32 flags = CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX; + u32 enables; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_cfa_l2_filter_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_cfa_l2_filter_alloc_output * )bp->hwrm_addr_resp; + if ( bp->vf ) + flags |= CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST; + enables = CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK; + + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_ALLOC, + cmd_len ); + req->flags = flags; + req->enables = enables; + memcpy ( ( char * )&req->l2_addr[0], ( char * )&bp->mac_addr[0], + ETH_ALEN ); + memset ( ( char * )&req->l2_addr_mask[0], 0xff, ETH_ALEN ); + if ( !bp->vf ) { + memcpy ( ( char * )&req->t_l2_addr[0], bp->mac_addr, ETH_ALEN ); + memset ( ( char * )&req->t_l2_addr_mask[0], 0xff, ETH_ALEN ); + } + req->src_type = CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT; + req->src_id = ( u32 )bp->port_idx; + req->dst_id = bp->vnic_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) + return STATUS_FAILURE; + + FLAG_SET ( bp->flag_hwrm, VALID_L2_FILTER ); + bp->l2_filter_id = resp->l2_filter_id; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_cfa_l2_filter_free ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_filter_free_input ); + struct hwrm_cfa_l2_filter_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_cfa_l2_filter_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE, + cmd_len ); + req->l2_filter_id = bp->l2_filter_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_L2_FILTER ); + return STATUS_SUCCESS; +} + +u32 set_rx_mask ( u32 rx_mask ) +{ + u32 mask = 0; + + if ( !rx_mask ) + return mask; + + mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + if ( rx_mask != RX_MASK_ACCEPT_NONE ) { + if ( rx_mask & RX_MASK_ACCEPT_MULTICAST ) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + if ( rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST ) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + if ( rx_mask & RX_MASK_PROMISCUOUS_MODE ) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + } + return mask; +} + +static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_cfa_l2_set_rx_mask_input ); + struct hwrm_cfa_l2_set_rx_mask_input *req; + u32 mask = set_rx_mask ( rx_mask ); + + req = ( struct hwrm_cfa_l2_set_rx_mask_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK, + cmd_len ); + req->vnic_id = bp->vnic_id; + req->mask = mask; + + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcfg_input ); + struct hwrm_port_phy_qcfg_input *req; + struct hwrm_port_phy_qcfg_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_port_phy_qcfg_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_port_phy_qcfg_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len ); + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + if ( idx & SUPPORT_SPEEDS ) + bp->support_speeds = resp->support_speeds; + + if ( idx & DETECT_MEDIA ) + bp->media_detect = resp->module_status; + + if ( idx & PHY_SPEED ) + bp->current_link_speed = resp->link_speed; + + if ( idx & PHY_STATUS ) { + if ( resp->link == PORT_PHY_QCFG_RESP_LINK_LINK ) + bp->link_status = STATUS_LINK_ACTIVE; + else + bp->link_status = STATUS_LINK_DOWN; + } + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_nvm_get_variable_req ( struct bnxt *bp, + u16 data_len, u16 option_num, u16 dimensions, u16 index_0 ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_nvm_get_variable_input ); + struct hwrm_nvm_get_variable_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_nvm_get_variable_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len ); + req->dest_data_addr = bp->dma_addr_mapping; + req->data_len = data_len; + req->option_num = option_num; + req->dimensions = dimensions; + req->index_0 = index_0; + return wait_resp ( bp, + HWRM_CMD_FLASH_MULTIPLAYER ( bp->hwrm_cmd_timeout ), + cmd_len, __func__ ); +} + +static int bnxt_get_link_speed ( struct bnxt *bp ) +{ + u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma; + + DBGP ( "%s\n", __func__ ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + ( u16 )LINK_SPEED_DRV_NUM, + 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + ( u16 )LINK_SPEED_FW_NUM, + 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + ( u16 )D3_LINK_SPEED_FW_NUM, 1, + ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK, + D3_SPEED_FW_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1, + ( u16 )PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM, + 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + bp->link_set |= SET_LINK ( *ptr32, + MEDIA_AUTO_DETECT_MASK, MEDIA_AUTO_DETECT_SHIFT ); + + switch ( bp->link_set & LINK_SPEED_DRV_MASK ) { + case LINK_SPEED_DRV_1G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_1000MBPS ); + break; + case LINK_SPEED_DRV_2_5G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_2500MBPS ); + break; + case LINK_SPEED_DRV_10G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_10GBPS ); + break; + case LINK_SPEED_DRV_25G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_25GBPS ); + break; + case LINK_SPEED_DRV_40G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_40GBPS ); + break; + case LINK_SPEED_DRV_50G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_50GBPS ); + break; + case LINK_SPEED_DRV_100G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100GBPS ); + break; + case LINK_SPEED_DRV_200G: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_200GBPS ); + break; + case LINK_SPEED_DRV_AUTONEG: + bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_AUTONEG ); + break; + default: + bp->medium = SET_MEDIUM_DUPLEX ( bp, MEDIUM_FULL_DUPLEX ); + break; + } + prn_set_speed ( bp->link_set ); + return STATUS_SUCCESS; +} + +static int bnxt_get_vlan ( struct bnxt *bp ) +{ + u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma; + + /* If VF is set to TRUE, Do not issue this command */ + if ( bp->vf ) + return STATUS_SUCCESS; + + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1, + ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM, 1, + ( u16 )bp->ordinal_value ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + + bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT ); + test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 16, + ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM, 1, + ( u16 )bp->ordinal_value ) != STATUS_SUCCESS ) + return STATUS_FAILURE; + + bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT ); + if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED ) + bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK; + else + bp->vlan_id = 0; + + if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED ) + DBGP ( "VLAN MBA Enabled ( %d )\n", + ( bp->mba_cfg2 & VLAN_VALUE_MASK ) ); + + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_qcfg_input ); + struct hwrm_func_backing_store_qcfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( !bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG, + cmd_len ); + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_backing_store_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_cfg_input ); + struct hwrm_func_backing_store_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( !bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG, + cmd_len ); + req->flags = FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE; + req->enables = 0; + return wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ ); +} + +static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_queue_qportcfg_input ); + struct hwrm_queue_qportcfg_input *req; + struct hwrm_queue_qportcfg_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( !bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_queue_qportcfg_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len ); + req->flags = 0; + req->port_id = 0; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + bp->queue_id = resp->queue_id0; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_mac_cfg_input ); + struct hwrm_port_mac_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + if ( bp->vf ) + return STATUS_SUCCESS; + + req = ( struct hwrm_port_mac_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len ); + req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_cfg_input ); + struct hwrm_port_phy_cfg_input *req; + u32 flags; + u32 enables = 0; + u16 force_link_speed = 0; + u16 auto_link_speed_mask = 0; + u8 auto_mode = 0; + u8 auto_pause = 0; + u8 auto_duplex = 0; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_port_phy_cfg_input * )bp->hwrm_addr_req; + flags = PORT_PHY_CFG_REQ_FLAGS_FORCE | + PORT_PHY_CFG_REQ_FLAGS_RESET_PHY; + + switch ( GET_MEDIUM_SPEED ( bp->medium ) ) { + case MEDIUM_SPEED_1000MBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; + break; + case MEDIUM_SPEED_10GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; + break; + case MEDIUM_SPEED_25GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; + break; + case MEDIUM_SPEED_40GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; + break; + case MEDIUM_SPEED_50GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + break; + case MEDIUM_SPEED_100GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; + break; + case MEDIUM_SPEED_200GBPS: + force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB; + break; + default: + auto_mode = PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; + flags &= ~PORT_PHY_CFG_REQ_FLAGS_FORCE; + enables |= PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE | + PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK | + PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX | + PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE; + auto_pause = PORT_PHY_CFG_REQ_AUTO_PAUSE_TX | + PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; + auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH; + auto_link_speed_mask = bp->support_speeds; + break; + } + + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_CFG, cmd_len ); + req->flags = flags; + req->enables = enables; + req->port_id = bp->port_idx; + req->force_link_speed = force_link_speed; + req->auto_mode = auto_mode; + req->auto_duplex = auto_duplex; + req->auto_pause = auto_pause; + req->auto_link_speed_mask = auto_link_speed_mask; + + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_query_phy_link ( struct bnxt *bp ) +{ + u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA; + + DBGP ( "%s\n", __func__ ); + /* Query Link Status */ + if ( bnxt_hwrm_port_phy_qcfg ( bp, QCFG_PHY_ALL ) != STATUS_SUCCESS ) { + return STATUS_FAILURE; + } + + if ( bp->link_status == STATUS_LINK_ACTIVE ) + return STATUS_SUCCESS; + + /* If VF is set to TRUE, Do not issue the following commands */ + if ( bp->vf ) + return STATUS_SUCCESS; + + /* If multi_host or NPAR, Do not issue bnxt_get_link_speed */ + if ( FLAG_TEST ( bp->flags, PORT_PHY_FLAGS ) ) { + dbg_flags ( __func__, bp->flags ); + return STATUS_SUCCESS; + } + + /* HWRM_NVM_GET_VARIABLE - speed */ + if ( bnxt_get_link_speed ( bp ) != STATUS_SUCCESS ) { + return STATUS_FAILURE; + } + + /* Configure link if it is not up */ + bnxt_hwrm_port_phy_cfg ( bp ); + + /* refresh link speed values after bringing link up */ + return bnxt_hwrm_port_phy_qcfg ( bp, flag ); +} + +static int bnxt_get_phy_link ( struct bnxt *bp ) +{ + u16 i; + u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA; + + DBGP ( "%s\n", __func__ ); + dbg_chip_info ( bp ); + for ( i = 0; i < ( bp->wait_link_timeout / 100 ); i++ ) { + if ( bnxt_hwrm_port_phy_qcfg ( bp, flag ) != STATUS_SUCCESS ) + break; + + if ( bp->link_status == STATUS_LINK_ACTIVE ) + break; + +// if ( bp->media_detect ) +// break; + mdelay ( LINK_POLL_WAIT_TIME ); + } + dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) ); + bnxt_set_link ( bp ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_alloc_input ); + struct hwrm_stat_ctx_alloc_input *req; + struct hwrm_stat_ctx_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_stat_ctx_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_stat_ctx_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len ); + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_STAT_CTX ); + bp->stat_ctx_id = ( u16 )resp->stat_ctx_id; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_stat_ctx_free_input ); + struct hwrm_stat_ctx_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_stat_ctx_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len ); + req->stat_ctx_id = ( u32 )bp->stat_ctx_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_STAT_CTX ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_free_input ); + struct hwrm_ring_grp_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_ring_grp_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len ); + req->ring_group_id = ( u32 )bp->ring_grp_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_RING_GRP ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_grp_alloc_input ); + struct hwrm_ring_grp_alloc_input *req; + struct hwrm_ring_grp_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( bp->thor ) + return STATUS_SUCCESS; + + req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_ring_grp_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len ); + req->cr = bp->cq_ring_id; + req->rr = bp->rx_ring_id; + req->ar = ( u16 )HWRM_NA_SIGNATURE; + if ( bp->vf ) + req->sc = bp->stat_ctx_id; + + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_RING_GRP ); + bp->ring_grp_id = ( u16 )resp->ring_group_id; + return STATUS_SUCCESS; +} + +int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_free_input ); + struct hwrm_ring_free_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_ring_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len ); + req->ring_type = ring_type; + req->ring_id = ring_id; + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_ring_alloc_input ); + struct hwrm_ring_alloc_input *req; + struct hwrm_ring_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_ring_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_ring_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len ); + req->ring_type = type; + switch ( type ) { + case RING_ALLOC_REQ_RING_TYPE_NQ: + req->page_size = LM_PAGE_BITS ( 12 ); + req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf ); + req->length = ( u32 )bp->nq.ring_cnt; + req->logical_id = 0xFFFF; // Required value for Thor FW? + req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt ); + break; + case RING_ALLOC_REQ_RING_TYPE_L2_CMPL: + req->page_size = LM_PAGE_BITS ( 8 ); + req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf ); + req->length = ( u32 )bp->cq.ring_cnt; + req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt ); + if ( !bp->thor ) + break; + req->enables = RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; + req->nq_ring_id = bp->nq_ring_id; + req->cq_handle = ( u64 )bp->nq_ring_id; + break; + case RING_ALLOC_REQ_RING_TYPE_TX: + req->page_size = LM_PAGE_BITS ( 8 ); + req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL; + req->length = ( u32 )bp->tx.ring_cnt; + req->queue_id = TX_RING_QID; + req->stat_ctx_id = ( u32 )bp->stat_ctx_id; + req->cmpl_ring_id = bp->cq_ring_id; + req->page_tbl_addr = virt_to_bus ( bp->tx.bd_virt ); + break; + case RING_ALLOC_REQ_RING_TYPE_RX: + req->page_size = LM_PAGE_BITS ( 8 ); + req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL; + req->length = ( u32 )bp->rx.ring_cnt; + req->stat_ctx_id = ( u32 )STAT_CTX_ID; + req->cmpl_ring_id = bp->cq_ring_id; + req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt ); + if ( !bp->thor ) + break; + req->queue_id = ( u16 )RX_RING_QID; + req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE; + req->enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID; + break; + default: + return STATUS_SUCCESS; + } + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed, type = %x\n", __func__, type ); + return STATUS_FAILURE; + } + + if ( type == RING_ALLOC_REQ_RING_TYPE_L2_CMPL ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_CQ ); + bp->cq_ring_id = resp->ring_id; + } else if ( type == RING_ALLOC_REQ_RING_TYPE_TX ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_TX ); + bp->tx_ring_id = resp->ring_id; + } else if ( type == RING_ALLOC_REQ_RING_TYPE_RX ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_RX ); + bp->rx_ring_id = resp->ring_id; + } else if ( type == RING_ALLOC_REQ_RING_TYPE_NQ ) { + FLAG_SET ( bp->flag_hwrm, VALID_RING_NQ ); + bp->nq_ring_id = resp->ring_id; + } + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_ring_alloc_cq ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_L2_CMPL ); +} + +static int bnxt_hwrm_ring_alloc_tx ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_TX ); +} + +static int bnxt_hwrm_ring_alloc_rx ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_RX ); +} + +static int bnxt_hwrm_ring_free_cq ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_CQ ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_CQ ); + + return ret; +} + +static int bnxt_hwrm_ring_free_tx ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_TX ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_TX ); + + return ret; +} + +static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_RX ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_RX ); + + return ret; +} + +static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp ) +{ + if ( !bp->thor ) + return STATUS_SUCCESS; + return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_NQ ); +} + +static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp ) +{ + int ret = STATUS_SUCCESS; + + if ( !bp->thor ) + return STATUS_SUCCESS; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_NQ ) ) ) + return ret; + + ret = RING_FREE ( bp, bp->nq_ring_id, RING_FREE_REQ_RING_TYPE_NQ ); + if ( ret == STATUS_SUCCESS ) + FLAG_RESET ( bp->flag_hwrm, VALID_RING_NQ ); + + return ret; +} + +static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_alloc_input ); + struct hwrm_vnic_alloc_input *req; + struct hwrm_vnic_alloc_output *resp; + int rc; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_vnic_alloc_input * )bp->hwrm_addr_req; + resp = ( struct hwrm_vnic_alloc_output * )bp->hwrm_addr_resp; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len ); + req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_SET ( bp->flag_hwrm, VALID_VNIC_ID ); + bp->vnic_id = resp->vnic_id; + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_vnic_free ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_free_input ); + struct hwrm_vnic_free_input *req; + int rc; + + DBGP ( "%s\n", __func__ ); + if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) ) + return STATUS_SUCCESS; + + req = ( struct hwrm_vnic_free_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len ); + req->vnic_id = bp->vnic_id; + rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + FLAG_RESET ( bp->flag_hwrm, VALID_VNIC_ID ); + return STATUS_SUCCESS; +} + +static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp ) +{ + u16 cmd_len = ( u16 )sizeof ( struct hwrm_vnic_cfg_input ); + struct hwrm_vnic_cfg_input *req; + + DBGP ( "%s\n", __func__ ); + req = ( struct hwrm_vnic_cfg_input * )bp->hwrm_addr_req; + hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len ); + req->enables = VNIC_CFG_REQ_ENABLES_MRU; + req->mru = bp->mtu; + + if ( bp->thor ) { + req->enables |= ( VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID ); + req->default_rx_ring_id = bp->rx_ring_id; + req->default_cmpl_ring_id = bp->cq_ring_id; + } else { + req->enables |= VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP; + req->dflt_ring_grp = bp->ring_grp_id; + } + + req->flags = VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE; + req->vnic_id = bp->vnic_id; + return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); +} + +static int bnxt_set_rx_mask ( struct bnxt *bp ) +{ + return bnxt_hwrm_set_rx_mask ( bp, RX_MASK ); +} + +static int bnxt_reset_rx_mask ( struct bnxt *bp ) +{ + return bnxt_hwrm_set_rx_mask ( bp, 0 ); +} + +typedef int ( *hwrm_func_t ) ( struct bnxt *bp ); + +hwrm_func_t bring_down_chip[] = { + bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */ + NULL, +}; + +hwrm_func_t bring_down_nic[] = { + bnxt_hwrm_cfa_l2_filter_free, /* HWRM_CFA_L2_FILTER_FREE */ + bnxt_reset_rx_mask, + bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */ + bnxt_free_rx_iob, /* HWRM_FREE_IOB */ + bnxt_hwrm_vnic_free, /* HWRM_VNIC_FREE */ + bnxt_hwrm_ring_free_grp, /* HWRM_RING_GRP_FREE */ + bnxt_hwrm_ring_free_rx, /* HWRM_RING_FREE - RX Ring */ + bnxt_hwrm_ring_free_tx, /* HWRM_RING_FREE - TX Ring */ + bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */ + bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */ + bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */ + NULL, +}; +hwrm_func_t bring_up_chip[] = { + bnxt_hwrm_ver_get, /* HWRM_VER_GET */ + bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */ + bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */ + bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */ + bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */ + bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */ + bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */ + bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */ + bnxt_get_vlan, /* HWRM_NVM_GET_VARIABLE - vlan */ + bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */ + bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */ + bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */ + bnxt_get_device_address, /* HW MAC address */ + NULL, +}; + +hwrm_func_t bring_up_nic[] = { + bnxt_hwrm_stat_ctx_alloc, /* HWRM_STAT_CTX_ALLOC */ + bnxt_hwrm_queue_qportcfg, /* HWRM_QUEUE_QPORTCFG */ + bnxt_hwrm_ring_alloc_nq, /* HWRM_RING_ALLOC - NQ Ring */ + bnxt_hwrm_ring_alloc_cq, /* HWRM_RING_ALLOC - CQ Ring */ + bnxt_hwrm_ring_alloc_tx, /* HWRM_RING_ALLOC - TX Ring */ + bnxt_hwrm_ring_alloc_rx, /* HWRM_RING_ALLOC - RX Ring */ + bnxt_hwrm_ring_alloc_grp, /* HWRM_RING_GRP_ALLOC - Group */ + bnxt_hwrm_vnic_alloc, /* HWRM_VNIC_ALLOC */ + bnxt_post_rx_buffers, /* Post RX buffers */ + bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */ + bnxt_hwrm_vnic_cfg, /* HWRM_VNIC_CFG */ + bnxt_hwrm_cfa_l2_filter_alloc, /* HWRM_CFA_L2_FILTER_ALLOC */ + bnxt_get_phy_link, /* HWRM_PORT_PHY_QCFG - PhyLink */ + bnxt_set_rx_mask, /* HWRM_CFA_L2_SET_RX_MASK */ + NULL, +}; + +int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp ) +{ + hwrm_func_t *ptr; + int ret; + + for ( ptr = cmds; *ptr; ++ptr ) { + memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE ); + memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE ); + ret = ( *ptr ) ( bp ); + if ( ret ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + } + return STATUS_SUCCESS; +} + +#define bnxt_down_chip( bp ) bnxt_hwrm_run ( bring_down_chip, bp ) +#define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp ) +#define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp ) +#define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp ) + +static int bnxt_open ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + + DBGP ( "%s\n", __func__ ); + bnxt_mm_nic ( bp ); + return (bnxt_up_nic ( bp )); +} + +static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob ) +{ + u16 prev_len = iob_len ( iob ); + + bp->vlan_tx = bnxt_get_pkt_vlan ( ( char * )iob->data ); + if ( !bp->vlan_tx && bp->vlan_id ) + bnxt_add_vlan ( iob, bp->vlan_id ); + + dbg_tx_vlan ( bp, ( char * )iob->data, prev_len, iob_len ( iob ) ); + if ( iob_len ( iob ) != prev_len ) + prev_len = iob_len ( iob ); + + iob_pad ( iob, ETH_ZLEN ); + dbg_tx_pad ( prev_len, iob_len ( iob ) ); +} + +static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob ) +{ + struct bnxt *bp = netdev_priv ( dev ); + u16 len, entry; + dma_addr_t mapping; + + if ( bnxt_tx_avail ( bp ) < 1 ) { + DBGP ( "- %s ( ): Failed no bd's available\n", __func__ ); + return -ENOBUFS; + } + + bnxt_tx_adjust_pkt ( bp, iob ); + entry = bp->tx.prod_id; + mapping = virt_to_bus ( iob->data ); + len = iob_len ( iob ); + bp->tx.iob[entry] = iob; + bnxt_set_txq ( bp, entry, mapping, len ); + entry = NEXT_IDX ( entry, bp->tx.ring_cnt ); + dump_tx_pkt ( ( u8 * )iob->data, len, bp->tx.prod_id ); + /* Packets are ready, update Tx producer idx local and on card. */ + bnxt_db_tx ( bp, ( u32 )entry ); + bp->tx.prod_id = entry; + bp->tx.cnt_req++; + /* memory barrier */ + mb ( ); + return 0; +} + +static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt ) +{ + u16 cons_id; + + cons_id = bp->nq.cons_id + cnt; + if ( cons_id >= bp->nq.ring_cnt ) { + /* Toggle completion bit when the ring wraps. */ + bp->nq.completion_bit ^= 1; + cons_id = cons_id - bp->nq.ring_cnt; + } + bp->nq.cons_id = cons_id; +} + +void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ) +{ + switch ( evt->event_id ) { + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + if ( evt->event_data1 & 0x01 ) + bp->link_status = STATUS_LINK_ACTIVE; + else + bp->link_status = STATUS_LINK_DOWN; + bnxt_set_link ( bp ); + dbg_link_status ( bp ); + break; + default: + break; + } +} + +static void bnxt_service_cq ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct cmpl_base *cmp; + struct tx_cmpl *tx; + u16 old_cid = bp->cq.cons_id; + int done = SERVICE_NEXT_CQ_BD; + u32 cq_type; + + while ( done == SERVICE_NEXT_CQ_BD ) { + cmp = ( struct cmpl_base * )BD_NOW ( bp->cq.bd_virt, + bp->cq.cons_id, + sizeof ( struct cmpl_base ) ); + + if ( ( cmp->info3_v & CMPL_BASE_V ) ^ bp->cq.completion_bit ) + break; + + cq_type = cmp->type & CMPL_BASE_TYPE_MASK; + dump_evt ( ( u8 * )cmp, cq_type, bp->cq.cons_id, 0 ); + dump_cq ( cmp, bp->cq.cons_id ); + + switch ( cq_type ) { + case CMPL_BASE_TYPE_TX_L2: + tx = ( struct tx_cmpl * )cmp; + bnxt_tx_complete ( dev, ( u16 )tx->opaque ); + /* Fall through */ + case CMPL_BASE_TYPE_STAT_EJECT: + bnxt_adv_cq_index ( bp, 1 ); + break; + case CMPL_BASE_TYPE_RX_L2: + done = bnxt_rx_complete ( dev, + ( struct rx_pkt_cmpl * )cmp ); + break; + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + bnxt_link_evt ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + bnxt_adv_cq_index ( bp, 1 ); + break; + default: + done = NO_MORE_CQ_BD_TO_SERVICE; + break; + } + } + + if ( bp->cq.cons_id != old_cid ) + bnxt_db_cq ( bp ); +} + +static void bnxt_service_nq ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + struct nq_base *nqp; + u16 old_cid = bp->nq.cons_id; + int done = SERVICE_NEXT_NQ_BD; + u32 nq_type; + + if ( !bp->thor ) + return; + + while ( done == SERVICE_NEXT_NQ_BD ) { + nqp = ( struct nq_base * )BD_NOW ( bp->nq.bd_virt, + bp->nq.cons_id, sizeof ( struct nq_base ) ); + if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit ) + break; + nq_type = ( nqp->type & NQ_CN_TYPE_MASK ); + dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 ); + dump_nq ( nqp, bp->nq.cons_id ); + + switch ( nq_type ) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + bnxt_link_evt ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + /* Fall through */ + case NQ_CN_TYPE_CQ_NOTIFICATION: + bnxt_adv_nq_index ( bp, 1 ); + break; + default: + done = NO_MORE_NQ_BD_TO_SERVICE; + break; + } + } + + if ( bp->nq.cons_id != old_cid ) + bnxt_db_nq ( bp ); +} + +static void bnxt_poll ( struct net_device *dev ) +{ + mb ( ); + bnxt_service_cq ( dev ); + bnxt_service_nq ( dev ); +} + +static void bnxt_close ( struct net_device *dev ) +{ + struct bnxt *bp = netdev_priv ( dev ); + + DBGP ( "%s\n", __func__ ); + bnxt_down_nic (bp); + + /* iounmap PCI BAR ( s ) */ + bnxt_down_pci(bp); + + /* Get Bar Address */ + bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 ); + bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 ); + bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 ); + +} + +static struct net_device_operations bnxt_netdev_ops = { + .open = bnxt_open, + .close = bnxt_close, + .poll = bnxt_poll, + .transmit = bnxt_tx, +}; + +static int bnxt_init_one ( struct pci_device *pci ) +{ + struct net_device *netdev; + struct bnxt *bp; + int err = 0; + + DBGP ( "%s\n", __func__ ); + /* Allocate network device */ + netdev = alloc_etherdev ( sizeof ( *bp ) ); + if ( !netdev ) { + DBGP ( "- %s ( ): alloc_etherdev Failed\n", __func__ ); + err = -ENOMEM; + goto disable_pdev; + } + + /* Initialise network device */ + netdev_init ( netdev, &bnxt_netdev_ops ); + + /* Driver private area for this device */ + bp = netdev_priv ( netdev ); + + /* Set PCI driver private data */ + pci_set_drvdata ( pci, netdev ); + + /* Clear Private area data */ + memset ( bp, 0, sizeof ( *bp ) ); + bp->pdev = pci; + bp->dev = netdev; + netdev->dev = &pci->dev; + + /* Enable PCI device */ + adjust_pci_device ( pci ); + + /* Get PCI Information */ + bnxt_get_pci_info ( bp ); + + /* Allocate and Initialise device specific parameters */ + if ( bnxt_alloc_mem ( bp ) != 0 ) { + DBGP ( "- %s ( ): bnxt_alloc_mem Failed\n", __func__ ); + goto err_down_pci; + } + + /* Get device specific information */ + if ( bnxt_up_chip ( bp ) != 0 ) { + DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ ); + goto err_down_chip; + } + + /* Register Network device */ + if ( register_netdev ( netdev ) != 0 ) { + DBGP ( "- %s ( ): register_netdev Failed\n", __func__ ); + goto err_down_chip; + } + + return 0; + +err_down_chip: + bnxt_down_chip (bp); + bnxt_free_mem ( bp ); + +err_down_pci: + bnxt_down_pci ( bp ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); + +disable_pdev: + pci_set_drvdata ( pci, NULL ); + return err; +} + +static void bnxt_remove_one ( struct pci_device *pci ) +{ + struct net_device *netdev = pci_get_drvdata ( pci ); + struct bnxt *bp = netdev_priv ( netdev ); + + DBGP ( "%s\n", __func__ ); + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Bring down Chip */ + bnxt_down_chip(bp); + + /* Free Allocated resource */ + bnxt_free_mem ( bp ); + + /* iounmap PCI BAR ( s ) */ + bnxt_down_pci ( bp ); + + /* Stop network device */ + netdev_nullify ( netdev ); + + /* Drop refernce to network device */ + netdev_put ( netdev ); +} + +/* Broadcom NXE PCI driver */ +struct pci_driver bnxt_pci_driver __pci_driver = { + .ids = bnxt_nics, + .id_count = ARRAY_SIZE ( bnxt_nics ), + .probe = bnxt_init_one, + .remove = bnxt_remove_one, +}; diff --git a/src/drivers/net/bnxt/bnxt.h b/src/drivers/net/bnxt/bnxt.h new file mode 100644 index 000000000..2cbaec5e5 --- /dev/null +++ b/src/drivers/net/bnxt/bnxt.h @@ -0,0 +1,871 @@ +/* + * Copyright © 2018 Broadcom. All Rights Reserved. + * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + + * This program is free software; you can redistribute it and/or modify it under + * the terms of version 2 of the GNU General Public License as published by the + * Free Software Foundation. + + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING + * ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR + * NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS + * ARE HELD TO BE LEGALLY INVALID. See the GNU General Public License for more + * details, a copy of which can be found in the file COPYING included with this + * package. + */ + +#undef ERRFILE +#define ERRFILE ERRFILE_tg3 + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#define dma_addr_t unsigned long + +union dma_addr64_t { + dma_addr_t addr; + u64 as_u64; +}; + +#include "bnxt_hsi.h" + +#define DRV_MODULE_NAME "bnxt" +#define IPXE_VERSION_MAJOR 1 +#define IPXE_VERSION_MINOR 0 +#define IPXE_VERSION_UPDATE 0 + +/* + * Broadcom ethernet driver defines. + */ +#define FLAG_SET(f, b) ((f) |= (b)) +#define FLAG_TEST(f, b) ((f) & (b)) +#define FLAG_RESET(f, b) ((f) &= ~(b)) +#define BNXT_FLAG_HWRM_SHORT_CMD_SUPP 0x0001 +#define BNXT_FLAG_HWRM_SHORT_CMD_REQ 0x0002 +#define BNXT_FLAG_RESOURCE_QCAPS_SUPPORT 0x0004 +#define BNXT_FLAG_MULTI_HOST 0x0008 +#define BNXT_FLAG_NPAR_MODE 0x0010 +#define BNXT_FLAG_ATOMICS_ENABLE 0x0020 +#define BNXT_FLAG_PCI_VF 0x0040 +/******************************************************************************* + * Status codes. + ******************************************************************************/ +#define STATUS_SUCCESS 0 +#define STATUS_FAILURE 1 +#define STATUS_NO_RESOURCE 2 +#define STATUS_INVALID_PARAMETER 3 +#define STATUS_LINK_ACTIVE 4 +#define STATUS_LINK_DOWN 5 +#define STATUS_LINK_SETTING_MISMATCH 6 +#define STATUS_TOO_MANY_FRAGMENTS 7 +#define STATUS_TRANSMIT_ABORTED 8 +#define STATUS_TRANSMIT_ERROR 9 +#define STATUS_RECEIVE_ABORTED 10 +#define STATUS_RECEIVE_ERROR 11 +#define STATUS_INVALID_PACKET_SIZE 12 +#define STATUS_NO_MAP_REGISTER 13 +#define STATUS_UNKNOWN_ADAPTER 14 +#define STATUS_NO_COALESCE_BUFFER 15 +#define STATUS_UNKNOWN_PHY 16 +#define STATUS_PENDING 17 +#define STATUS_NO_TX_DESC 18 +#define STATUS_NO_TX_BD 19 +#define STATUS_UNKNOWN_MEDIUM 20 +#define STATUS_RESOURCE 21 +#define STATUS_ABORT_REASON_DISCONNECT 22 +#define STATUS_ABORT_REASON_UPLOAD 23 +#define STATUS_TIMEOUT 0xffff +/******************************************************************************* + * Receive filter masks. + ******************************************************************************/ +#define RX_MASK_ACCEPT_NONE 0x0000 +#define RX_MASK_ACCEPT_UNICAST 0x0001 +#define RX_MASK_ACCEPT_MULTICAST 0x0002 +#define RX_MASK_ACCEPT_ALL_MULTICAST 0x0004 +#define RX_MASK_ACCEPT_BROADCAST 0x0008 +#define RX_MASK_ACCEPT_ERROR_PACKET 0x0010 +#define RX_MASK_PROMISCUOUS_MODE 0x10000 +/******************************************************************************* + * media speed. + ******************************************************************************/ +#define MEDIUM_SPEED_AUTONEG 0x0000L +#define MEDIUM_SPEED_UNKNOWN 0x0000L +#define MEDIUM_SPEED_10MBPS 0x0100L +#define MEDIUM_SPEED_100MBPS 0x0200L +#define MEDIUM_SPEED_1000MBPS 0x0300L +#define MEDIUM_SPEED_2500MBPS 0x0400L +#define MEDIUM_SPEED_10GBPS 0x0600L +#define MEDIUM_SPEED_20GBPS 0x0700L +#define MEDIUM_SPEED_25GBPS 0x0800L +#define MEDIUM_SPEED_40GBPS 0x0900L +#define MEDIUM_SPEED_50GBPS 0x0a00L +#define MEDIUM_SPEED_100GBPS 0x0b00L +#define MEDIUM_SPEED_200GBPS 0x0c00L +#define MEDIUM_SPEED_AUTONEG_1G_FALLBACK 0x8000L /* Serdes */ +#define MEDIUM_SPEED_AUTONEG_2_5G_FALLBACK 0x8100L /* Serdes */ +#define MEDIUM_SPEED_HARDWARE_DEFAULT 0xff00L /* Serdes nvram def.*/ +#define MEDIUM_SPEED_MASK 0xff00L +#define GET_MEDIUM_SPEED(m) ((m) & MEDIUM_SPEED_MASK) +#define SET_MEDIUM_SPEED(bp, s) ((bp->medium & ~MEDIUM_SPEED_MASK) | s) +#define MEDIUM_UNKNOWN_DUPLEX 0x00000L +#define MEDIUM_FULL_DUPLEX 0x00000L +#define MEDIUM_HALF_DUPLEX 0x10000L +#define GET_MEDIUM_DUPLEX(m) ((m) & MEDIUM_HALF_DUPLEX) +#define SET_MEDIUM_DUPLEX(bp, d) ((bp->medium & ~MEDIUM_HALF_DUPLEX) | d) +#define MEDIUM_SELECTIVE_AUTONEG 0x01000000L +#define GET_MEDIUM_AUTONEG_MODE(m) ((m) & 0xff000000L) +#define PCICFG_ME_REGISTER 0x98 +#define GRC_COM_CHAN_BASE 0 +#define GRC_COM_CHAN_TRIG 0x100 +#define GRC_IND_BAR_0_ADDR 0x78 +#define GRC_IND_BAR_1_ADDR 0x7C +#define GRC_IND_BAR_0_DATA 0x80 +#define GRC_IND_BAR_1_DATA 0x84 +#define GRC_BASE_WIN_0 0x400 +#define GRC_DATA_WIN_0 0x1000 +#define HWRM_CMD_DEFAULT_TIMEOUT 500 /* in Miliseconds */ +#define HWRM_CMD_POLL_WAIT_TIME 100 /* In MicroeSconds */ +#define HWRM_CMD_DEFAULT_MULTIPLAYER(a) ((a) * 10) +#define HWRM_CMD_FLASH_MULTIPLAYER(a) ((a) * 100) +#define HWRM_CMD_FLASH_ERASE_MULTIPLAYER(a) ((a) * 1000) +#define HWRM_CMD_WAIT(b) ((bp->hwrm_cmd_timeout) * (b)) +#define MAX_ETHERNET_PACKET_BUFFER_SIZE 1536 +#define DEFAULT_NUMBER_OF_CMPL_RINGS 0x01 +#define DEFAULT_NUMBER_OF_TX_RINGS 0x01 +#define DEFAULT_NUMBER_OF_RX_RINGS 0x01 +#define DEFAULT_NUMBER_OF_RING_GRPS 0x01 +#define DEFAULT_NUMBER_OF_STAT_CTXS 0x01 +#define NUM_RX_BUFFERS 8 +#define MAX_RX_DESC_CNT 16 +#define MAX_TX_DESC_CNT 16 +#define MAX_CQ_DESC_CNT 64 +#define TX_RING_BUFFER_SIZE (MAX_TX_DESC_CNT * sizeof(struct tx_bd_short)) +#define RX_RING_BUFFER_SIZE \ + (MAX_RX_DESC_CNT * sizeof(struct rx_prod_pkt_bd)) +#define CQ_RING_BUFFER_SIZE (MAX_CQ_DESC_CNT * sizeof(struct cmpl_base)) +#define BNXT_DMA_ALIGNMENT 256 //64 +#define DMA_ALIGN_4K 4096 //thor tx & rx +#define REQ_BUFFER_SIZE 1024 +#define RESP_BUFFER_SIZE 1024 +#define DMA_BUFFER_SIZE 1024 +#define LM_PAGE_BITS(a) (a) +#define BNXT_RX_STD_DMA_SZ (1536 + 64 + 2) +#define NEXT_IDX(N, S) (((N) + 1) & ((S) - 1)) +#define BD_NOW(bd, entry, len) (&((u8 *)(bd))[(entry) * (len)]) +#define BNXT_CQ_INTR_MODE(vf) (\ + ((vf) ? RING_ALLOC_REQ_INT_MODE_MSIX : RING_ALLOC_REQ_INT_MODE_POLL)) +/* Set default link timeout period to 1 second */ +#define LINK_DEFAULT_TIMEOUT 1000 +#define LINK_POLL_WAIT_TIME 100 /* In Miliseconds */ +#define RX_MASK (\ + RX_MASK_ACCEPT_BROADCAST | \ + RX_MASK_ACCEPT_ALL_MULTICAST | \ + RX_MASK_ACCEPT_MULTICAST) +#define MAX_NQ_DESC_CNT 64 +#define NQ_RING_BUFFER_SIZE (MAX_NQ_DESC_CNT * sizeof(struct cmpl_base)) +#define TX_RING_QID (bp->thor ? (u16)bp->queue_id : ((u16)bp->port_idx * 10)) +#define RX_RING_QID (bp->thor ? bp->queue_id : 0) +#define STAT_CTX_ID ((bp->vf || bp->thor) ? bp->stat_ctx_id : 0) +#define TX_AVAIL(r) (r - 1) +#define TX_IN_USE(a, b, c) ((a - b) & (c - 1)) +#define NO_MORE_NQ_BD_TO_SERVICE 1 +#define SERVICE_NEXT_NQ_BD 0 +#define NO_MORE_CQ_BD_TO_SERVICE 1 +#define SERVICE_NEXT_CQ_BD 0 +#define MAC_HDR_SIZE 12 +#define VLAN_HDR_SIZE 4 +#define ETHERTYPE_VLAN 0x8100 +#define BYTE_SWAP_S(w) (\ + (((w) & 0xff00) >> 8) | \ + (((w) & 0x00ff) << 8)) +#define DB_OFFSET_PF 0x10000 +#define DB_OFFSET_VF 0x4000 +#define DBC_MSG_IDX(idx) (\ + ((idx) << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK) +#define DBC_MSG_XID(xid, flg) (\ + (((xid) << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | \ + DBC_DBC_PATH_L2 | (flg)) +#define PHY_STATUS 0x0001 +#define PHY_SPEED 0x0002 +#define DETECT_MEDIA 0x0004 +#define SUPPORT_SPEEDS 0x0008 +#define QCFG_PHY_ALL (\ + SUPPORT_SPEEDS | DETECT_MEDIA | PHY_SPEED | PHY_STATUS) +#define str_mbps "Mbps" +#define str_gbps "Gbps" +/* + * Broadcom ethernet driver nvm defines. + */ +/* nvm cfg 203 - u32 link_settings */ +#define LINK_SPEED_DRV_NUM 203 +#define LINK_SPEED_DRV_MASK 0x0000000F +#define LINK_SPEED_DRV_SHIFT 0 +#define LINK_SPEED_DRV_AUTONEG 0x0 +#define NS_LINK_SPEED_DRV_AUTONEG 0x0 +#define LINK_SPEED_DRV_1G 0x1 +#define NS_LINK_SPEED_DRV_1G 0x1 +#define LINK_SPEED_DRV_10G 0x2 +#define NS_LINK_SPEED_DRV_10G 0x2 +#define LINK_SPEED_DRV_25G 0x3 +#define NS_LINK_SPEED_DRV_25G 0x3 +#define LINK_SPEED_DRV_40G 0x4 +#define NS_LINK_SPEED_DRV_40G 0x4 +#define LINK_SPEED_DRV_50G 0x5 +#define NS_LINK_SPEED_DRV_50G 0x5 +#define LINK_SPEED_DRV_100G 0x6 +#define NS_LINK_SPEED_DRV_100G 0x6 +#define LINK_SPEED_DRV_200G 0x7 +#define NS_LINK_SPEED_DRV_200G 0x7 +#define LINK_SPEED_DRV_2_5G 0xE +#define NS_LINK_SPEED_DRV_2_5G 0xE +#define LINK_SPEED_DRV_100M 0xF +#define NS_LINK_SPEED_DRV_100M 0xF +/* nvm cfg 201 - u32 speed_cap_mask */ +#define SPEED_CAPABILITY_DRV_MASK 0x0000FFFF +#define SPEED_CAPABILITY_DRV_SHIFT 0 +#define SPEED_CAPABILITY_DRV_1G 0x1 +#define NS_SPEED_CAPABILITY_DRV_1G 0x1 +#define SPEED_CAPABILITY_DRV_10G 0x2 +#define NS_SPEED_CAPABILITY_DRV_10G 0x2 +#define SPEED_CAPABILITY_DRV_25G 0x4 +#define NS_SPEED_CAPABILITY_DRV_25G 0x4 +#define SPEED_CAPABILITY_DRV_40G 0x8 +#define NS_SPEED_CAPABILITY_DRV_40G 0x8 +#define SPEED_CAPABILITY_DRV_50G 0x10 +#define NS_SPEED_CAPABILITY_DRV_50G 0x10 +#define SPEED_CAPABILITY_DRV_100G 0x20 +#define NS_SPEED_CAPABILITY_DRV_100G 0x20 +#define SPEED_CAPABILITY_DRV_200G 0x40 +#define NS_SPEED_CAPABILITY_DRV_200G 0x40 +#define SPEED_CAPABILITY_DRV_2_5G 0x4000 +#define NS_SPEED_CAPABILITY_DRV_2_5G 0x4000 +#define SPEED_CAPABILITY_DRV_100M 0x8000 +#define NS_SPEED_CAPABILITY_DRV_100M 0x8000 +/* nvm cfg 202 */ +#define SPEED_CAPABILITY_FW_MASK 0xFFFF0000 +#define SPEED_CAPABILITY_FW_SHIFT 16 +#define SPEED_CAPABILITY_FW_1G (0x1L << 16) +#define NS_SPEED_CAPABILITY_FW_1G (0x1) +#define SPEED_CAPABILITY_FW_10G (0x2L << 16) +#define NS_SPEED_CAPABILITY_FW_10G (0x2) +#define SPEED_CAPABILITY_FW_25G (0x4L << 16) +#define NS_SPEED_CAPABILITY_FW_25G (0x4) +#define SPEED_CAPABILITY_FW_40G (0x8L << 16) +#define NS_SPEED_CAPABILITY_FW_40G (0x8) +#define SPEED_CAPABILITY_FW_50G (0x10L << 16) +#define NS_SPEED_CAPABILITY_FW_50G (0x10) +#define SPEED_CAPABILITY_FW_100G (0x20L << 16) +#define NS_SPEED_CAPABILITY_FW_100G (0x20) +#define SPEED_CAPABILITY_FW_200G (0x40L << 16) +#define NS_SPEED_CAPABILITY_FW_200G (0x40) +#define SPEED_CAPABILITY_FW_2_5G (0x4000L << 16) +#define NS_SPEED_CAPABILITY_FW_2_5G (0x4000) +#define SPEED_CAPABILITY_FW_100M (0x8000UL << 16) +#define NS_SPEED_CAPABILITY_FW_100M (0x8000) +/* nvm cfg 205 */ +#define LINK_SPEED_FW_NUM 205 +#define LINK_SPEED_FW_MASK 0x00000780 +#define LINK_SPEED_FW_SHIFT 7 +#define LINK_SPEED_FW_AUTONEG (0x0L << 7) +#define NS_LINK_SPEED_FW_AUTONEG (0x0) +#define LINK_SPEED_FW_1G (0x1L << 7) +#define NS_LINK_SPEED_FW_1G (0x1) +#define LINK_SPEED_FW_10G (0x2L << 7) +#define NS_LINK_SPEED_FW_10G (0x2) +#define LINK_SPEED_FW_25G (0x3L << 7) +#define NS_LINK_SPEED_FW_25G (0x3) +#define LINK_SPEED_FW_40G (0x4L << 7) +#define NS_LINK_SPEED_FW_40G (0x4) +#define LINK_SPEED_FW_50G (0x5L << 7) +#define NS_LINK_SPEED_FW_50G (0x5) +#define LINK_SPEED_FW_100G (0x6L << 7) +#define NS_LINK_SPEED_FW_100G (0x6) +#define LINK_SPEED_FW_200G (0x7L << 7) +#define NS_LINK_SPEED_FW_200G (0x7) +#define LINK_SPEED_FW_2_5G (0xEL << 7) +#define NS_LINK_SPEED_FW_2_5G (0xE) +#define LINK_SPEED_FW_100M (0xFL << 7) +#define NS_LINK_SPEED_FW_100M (0xF) +/* nvm cfg 210 */ +#define D3_LINK_SPEED_FW_NUM 210 +#define D3_LINK_SPEED_FW_MASK 0x000F0000 +#define D3_LINK_SPEED_FW_SHIFT 16 +#define D3_LINK_SPEED_FW_AUTONEG (0x0L << 16) +#define NS_D3_LINK_SPEED_FW_AUTONEG (0x0) +#define D3_LINK_SPEED_FW_1G (0x1L << 16) +#define NS_D3_LINK_SPEED_FW_1G (0x1) +#define D3_LINK_SPEED_FW_10G (0x2L << 16) +#define NS_D3_LINK_SPEED_FW_10G (0x2) +#define D3_LINK_SPEED_FW_25G (0x3L << 16) +#define NS_D3_LINK_SPEED_FW_25G (0x3) +#define D3_LINK_SPEED_FW_40G (0x4L << 16) +#define NS_D3_LINK_SPEED_FW_40G (0x4) +#define D3_LINK_SPEED_FW_50G (0x5L << 16) +#define NS_D3_LINK_SPEED_FW_50G (0x5) +#define D3_LINK_SPEED_FW_100G (0x6L << 16) +#define NS_D3_LINK_SPEED_FW_100G (0x6) +#define D3_LINK_SPEED_FW_200G (0x7L << 16) +#define NS_D3_LINK_SPEED_FW_200G (0x7) +#define D3_LINK_SPEED_FW_2_5G (0xEL << 16) +#define NS_D3_LINK_SPEED_FW_2_5G (0xE) +#define D3_LINK_SPEED_FW_100M (0xFL << 16) +#define NS_D3_LINK_SPEED_FW_100M (0xF) +/* nvm cfg 211 */ +#define D3_FLOW_CONTROL_FW_NUM 211 +#define D3_FLOW_CONTROL_FW_MASK 0x00700000 +#define D3_FLOW_CONTROL_FW_SHIFT 20 +#define D3_FLOW_CONTROL_FW_AUTO (0x0L << 20) +#define NS_D3_FLOW_CONTROL_FW_AUTO (0x0) +#define D3_FLOW_CONTROL_FW_TX (0x1L << 20) +#define NS_D3_FLOW_CONTROL_FW_TX (0x1) +#define D3_FLOW_CONTROL_FW_RX (0x2L << 20) +#define NS_D3_FLOW_CONTROL_FW_RX (0x2) +#define D3_FLOW_CONTROL_FW_BOTH (0x3L << 20) +#define NS_D3_FLOW_CONTROL_FW_BOTH (0x3) +#define D3_FLOW_CONTROL_FW_NONE (0x4L << 20) +#define NS_D3_FLOW_CONTROL_FW_NONE (0x4) +/* nvm cfg 213 */ +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM 213 +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_MASK 0x02000000 +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_SHIFT 25 +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_DISABLED (0x0L << 25) +#define NS_PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_DISABLED (0x0) +#define PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_ENABLED (0x1L << 25) +#define NS_PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_ENABLED (0x1) +/* nvm cfg 357 - u32 mba_cfg2 */ +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM 357 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_MASK 0x0000FFFF +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_SHIFT 0 +/* nvm cfg 358 - u32 mba_cfg2 */ +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM 358 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_MASK 0x00010000 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_SHIFT 16 +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_DISABLED (0x0L << 16) +#define NS_FUNC_CFG_PRE_BOOT_MBA_VLAN_DISABLED (0x0) +#define FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED (0x1L << 16) +#define NS_FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED (0x1) + +struct tx_doorbell { + u32 key_idx; +#define TX_DOORBELL_IDX_MASK 0xffffffUL +#define TX_DOORBELL_IDX_SFT 0 +#define TX_DOORBELL_KEY_MASK 0xf0000000UL +#define TX_DOORBELL_KEY_SFT 28 + #define TX_DOORBELL_KEY_TX (0x0UL << 28) + #define TX_DOORBELL_KEY_LAST TX_DOORBELL_KEY_TX +}; + +struct rx_doorbell { + u32 key_idx; +#define RX_DOORBELL_IDX_MASK 0xffffffUL +#define RX_DOORBELL_IDX_SFT 0 +#define RX_DOORBELL_KEY_MASK 0xf0000000UL +#define RX_DOORBELL_KEY_SFT 28 + #define RX_DOORBELL_KEY_RX (0x1UL << 28) + #define RX_DOORBELL_KEY_LAST RX_DOORBELL_KEY_RX +}; + +struct cmpl_doorbell { + u32 key_mask_valid_idx; +#define CMPL_DOORBELL_IDX_MASK 0xffffffUL +#define CMPL_DOORBELL_IDX_SFT 0 +#define CMPL_DOORBELL_IDX_VALID 0x4000000UL +#define CMPL_DOORBELL_MASK 0x8000000UL +#define CMPL_DOORBELL_KEY_MASK 0xf0000000UL +#define CMPL_DOORBELL_KEY_SFT 28 + #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28) + #define CMPL_DOORBELL_KEY_LAST CMPL_DOORBELL_KEY_CMPL +}; + +/* dbc_dbc (size:64b/8B) */ +struct dbc_dbc { + __le32 index; + #define DBC_DBC_INDEX_MASK 0xffffffUL + #define DBC_DBC_INDEX_SFT 0 + __le32 type_path_xid; + #define DBC_DBC_XID_MASK 0xfffffUL + #define DBC_DBC_XID_SFT 0 + #define DBC_DBC_PATH_MASK 0x3000000UL + #define DBC_DBC_PATH_SFT 24 + #define DBC_DBC_PATH_ROCE (0x0UL << 24) + #define DBC_DBC_PATH_L2 (0x1UL << 24) + #define DBC_DBC_PATH_ENGINE (0x2UL << 24) + #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE + #define DBC_DBC_DEBUG_TRACE 0x8000000UL + #define DBC_DBC_TYPE_MASK 0xf0000000UL + #define DBC_DBC_TYPE_SFT 28 + #define DBC_DBC_TYPE_SQ (0x0UL << 28) + #define DBC_DBC_TYPE_RQ (0x1UL << 28) + #define DBC_DBC_TYPE_SRQ (0x2UL << 28) + #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_DBC_TYPE_CQ (0x4UL << 28) + #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) + #define DBC_DBC_TYPE_NQ (0xaUL << 28) + #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_DBC_TYPE_NULL (0xfUL << 28) + #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL +}; + +/******************************************************************************* + * Transmit info. + *****************************************************************************/ +struct tx_bd_short { + u16 flags_type; +#define TX_BD_SHORT_TYPE_MASK 0x3fUL +#define TX_BD_SHORT_TYPE_SFT 0 +#define TX_BD_SHORT_TYPE_TX_BD_SHORT 0x0UL +#define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT +#define TX_BD_SHORT_FLAGS_MASK 0xffc0UL +#define TX_BD_SHORT_FLAGS_SFT 6 +#define TX_BD_SHORT_FLAGS_PACKET_END 0x40UL +#define TX_BD_SHORT_FLAGS_NO_CMPL 0x80UL +#define TX_BD_SHORT_FLAGS_BD_CNT_MASK 0x1f00UL +#define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 +#define TX_BD_SHORT_FLAGS_LHINT_MASK 0x6000UL +#define TX_BD_SHORT_FLAGS_LHINT_SFT 13 +#define TX_BD_SHORT_FLAGS_LHINT_LT512 (0x0UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_LT1K (0x1UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_LT2K (0x2UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_GTE2K (0x3UL << 13) +#define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K +#define TX_BD_SHORT_FLAGS_COAL_NOW 0x8000UL + u16 len; + u32 opaque; + union dma_addr64_t dma; +}; + +struct tx_cmpl { + u16 flags_type; +#define TX_CMPL_TYPE_MASK 0x3fUL +#define TX_CMPL_TYPE_SFT 0 +#define TX_CMPL_TYPE_TX_L2 0x0UL +#define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2 +#define TX_CMPL_FLAGS_MASK 0xffc0UL +#define TX_CMPL_FLAGS_SFT 6 +#define TX_CMPL_FLAGS_ERROR 0x40UL +#define TX_CMPL_FLAGS_PUSH 0x80UL + u16 unused_0; + u32 opaque; + u16 errors_v; +#define TX_CMPL_V 0x1UL +#define TX_CMPL_ERRORS_MASK 0xfffeUL +#define TX_CMPL_ERRORS_SFT 1 +#define TX_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL +#define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 +#define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (0x0UL << 1) +#define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (0x2UL << 1) +#define TX_CMPL_ERRORS_BUFFER_ERROR_LAST TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT +#define TX_CMPL_ERRORS_ZERO_LENGTH_PKT 0x10UL +#define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH 0x20UL +#define TX_CMPL_ERRORS_DMA_ERROR 0x40UL +#define TX_CMPL_ERRORS_HINT_TOO_SHORT 0x80UL +#define TX_CMPL_ERRORS_POISON_TLP_ERROR 0x100UL + u16 unused_1; + u32 unused_2; +}; + +struct tx_info { + void *bd_virt; + struct io_buffer *iob[MAX_TX_DESC_CNT]; + u16 prod_id; /* Tx producer index. */ + u16 cons_id; + u16 ring_cnt; + u32 cnt; /* Tx statistics. */ + u32 cnt_req; +}; + +struct cmpl_base { + u16 type; +#define CMPL_BASE_TYPE_MASK 0x3fUL +#define CMPL_BASE_TYPE_SFT 0 +#define CMPL_BASE_TYPE_TX_L2 0x0UL +#define CMPL_BASE_TYPE_RX_L2 0x11UL +#define CMPL_BASE_TYPE_RX_AGG 0x12UL +#define CMPL_BASE_TYPE_RX_TPA_START 0x13UL +#define CMPL_BASE_TYPE_RX_TPA_END 0x15UL +#define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL +#define CMPL_BASE_TYPE_HWRM_DONE 0x20UL +#define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL +#define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL +#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL +#define CMPL_BASE_TYPE_CQ_NOTIFICATION 0x30UL +#define CMPL_BASE_TYPE_SRQ_EVENT 0x32UL +#define CMPL_BASE_TYPE_DBQ_EVENT 0x34UL +#define CMPL_BASE_TYPE_QP_EVENT 0x38UL +#define CMPL_BASE_TYPE_FUNC_EVENT 0x3aUL +#define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT + u16 info1; + u32 info2; + u32 info3_v; +#define CMPL_BASE_V 0x1UL +#define CMPL_BASE_INFO3_MASK 0xfffffffeUL +#define CMPL_BASE_INFO3_SFT 1 + u32 info4; +}; + +struct cmp_info { + void *bd_virt; + u16 cons_id; + u16 ring_cnt; + u8 completion_bit; + u8 res[3]; +}; + +/* Completion Queue Notification */ +/* nq_cn (size:128b/16B) */ +struct nq_base { + u16 type; +/* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ +#define NQ_CN_TYPE_MASK 0x3fUL +#define NQ_CN_TYPE_SFT 0 +/* CQ Notification */ + #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION + u16 reserved16; +/* + * This is an application level ID used to identify the + * CQ. This field carries the lower 32b of the value. + */ + u32 cq_handle_low; + u32 v; +/* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ +#define NQ_CN_V 0x1UL +/* + * This is an application level ID used to identify the + * CQ. This field carries the upper 32b of the value. + */ + u32 cq_handle_high; +}; + +struct nq_info { + void *bd_virt; + u16 cons_id; + u16 ring_cnt; + u8 completion_bit; + u8 res[3]; +}; + +struct rx_pkt_cmpl { + u16 flags_type; +#define RX_PKT_CMPL_TYPE_MASK 0x3fUL +#define RX_PKT_CMPL_TYPE_SFT 0 +#define RX_PKT_CMPL_TYPE_RX_L2 0x11UL +#define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2 +#define RX_PKT_CMPL_FLAGS_MASK 0xffc0UL +#define RX_PKT_CMPL_FLAGS_SFT 6 +#define RX_PKT_CMPL_FLAGS_ERROR 0x40UL +#define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK 0x380UL +#define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 +#define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (0x0UL << 7) +#define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (0x1UL << 7) +#define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (0x2UL << 7) +#define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST RX_PKT_CMPL_FLAGS_PLACEMENT_HDS +#define RX_PKT_CMPL_FLAGS_RSS_VALID 0x400UL +#define RX_PKT_CMPL_FLAGS_UNUSED 0x800UL +#define RX_PKT_CMPL_FLAGS_ITYPE_MASK 0xf000UL +#define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 +#define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_IP (0x1UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_TCP (0x2UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_UDP (0x3UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (0x4UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (0x5UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (0x7UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 12) +#define RX_PKT_CMPL_FLAGS_ITYPE_LAST RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP + u16 len; + u32 opaque; + u8 agg_bufs_v1; +#define RX_PKT_CMPL_V1 0x1UL +#define RX_PKT_CMPL_AGG_BUFS_MASK 0x3eUL +#define RX_PKT_CMPL_AGG_BUFS_SFT 1 +#define RX_PKT_CMPL_UNUSED1_MASK 0xc0UL +#define RX_PKT_CMPL_UNUSED1_SFT 6 + u8 rss_hash_type; + u8 payload_offset; + u8 unused1; + u32 rss_hash; +}; + +struct rx_pkt_cmpl_hi { + u32 flags2; +#define RX_PKT_CMPL_FLAGS2_IP_CS_CALC 0x1UL +#define RX_PKT_CMPL_FLAGS2_L4_CS_CALC 0x2UL +#define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC 0x4UL +#define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC 0x8UL +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK 0xf0UL +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (0x0UL << 4) +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (0x1UL << 4) +#define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN +#define RX_PKT_CMPL_FLAGS2_IP_TYPE 0x100UL + u32 metadata; +#define RX_PKT_CMPL_METADATA_VID_MASK 0xfffUL +#define RX_PKT_CMPL_METADATA_VID_SFT 0 +#define RX_PKT_CMPL_METADATA_DE 0x1000UL +#define RX_PKT_CMPL_METADATA_PRI_MASK 0xe000UL +#define RX_PKT_CMPL_METADATA_PRI_SFT 13 +#define RX_PKT_CMPL_METADATA_TPID_MASK 0xffff0000UL +#define RX_PKT_CMPL_METADATA_TPID_SFT 16 + u16 errors_v2; +#define RX_PKT_CMPL_V2 0x1UL +#define RX_PKT_CMPL_ERRORS_MASK 0xfffeUL +#define RX_PKT_CMPL_ERRORS_SFT 1 +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) +#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT +#define RX_PKT_CMPL_ERRORS_IP_CS_ERROR 0x10UL +#define RX_PKT_CMPL_ERRORS_L4_CS_ERROR 0x20UL +#define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR 0x40UL +#define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR 0x80UL +#define RX_PKT_CMPL_ERRORS_CRC_ERROR 0x100UL +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK 0xe00UL +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9) +#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK 0xf000UL +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12) +#define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + u16 cfa_code; + u32 reorder; +#define RX_PKT_CMPL_REORDER_MASK 0xffffffUL +#define RX_PKT_CMPL_REORDER_SFT 0 +}; + +struct rx_prod_pkt_bd { + u16 flags_type; +#define RX_PROD_PKT_BD_TYPE_MASK 0x3fUL +#define RX_PROD_PKT_BD_TYPE_SFT 0 +#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT 0x4UL +#define RX_PROD_PKT_BD_TYPE_LAST RX_PROD_PKT_BD_TYPE_RX_PROD_PKT +#define RX_PROD_PKT_BD_FLAGS_MASK 0xffc0UL +#define RX_PROD_PKT_BD_FLAGS_SFT 6 +#define RX_PROD_PKT_BD_FLAGS_SOP_PAD 0x40UL +#define RX_PROD_PKT_BD_FLAGS_EOP_PAD 0x80UL +#define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK 0x300UL +#define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 + u16 len; + u32 opaque; + union dma_addr64_t dma; +}; + +struct rx_info { + void *bd_virt; + struct io_buffer *iob[NUM_RX_BUFFERS]; + u16 iob_cnt; + u16 buf_cnt; /* Total Rx buffer descriptors. */ + u16 ring_cnt; + u16 cons_id; /* Last processed consumer index. */ +/* Receive statistics. */ + u32 cnt; + u32 good; + u32 drop_err; + u32 drop_lb; + u32 drop_vlan; +}; + +#define VALID_DRIVER_REG 0x0001 +#define VALID_STAT_CTX 0x0002 +#define VALID_RING_CQ 0x0004 +#define VALID_RING_TX 0x0008 +#define VALID_RING_RX 0x0010 +#define VALID_RING_GRP 0x0020 +#define VALID_VNIC_ID 0x0040 +#define VALID_RX_IOB 0x0080 +#define VALID_L2_FILTER 0x0100 +#define VALID_RING_NQ 0x0200 + +struct bnxt { +/* begin "general, frequently-used members" cacheline section */ +/* If the IRQ handler (which runs lockless) needs to be + * quiesced, the following bitmask state is used. The + * SYNC flag is set by non-IRQ context code to initiate + * the quiescence. + * + * When the IRQ handler notices that SYNC is set, it + * disables interrupts and returns. + * + * When all outstanding IRQ handlers have returned after + * the SYNC flag has been set, the setter can be assured + * that interrupts will no longer get run. + * + * In this way all SMP driver locks are never acquired + * in hw IRQ context, only sw IRQ context or lower. + */ + unsigned int irq_sync; + struct net_device *dev; + struct pci_device *pdev; + void *hwrm_addr_req; + void *hwrm_addr_resp; + void *hwrm_addr_dma; + dma_addr_t req_addr_mapping; + dma_addr_t resp_addr_mapping; + dma_addr_t dma_addr_mapping; + struct tx_info tx; /* Tx info. */ + struct rx_info rx; /* Rx info. */ + struct cmp_info cq; /* completion info. */ + struct nq_info nq; /* completion info. */ + u16 nq_ring_id; + u8 queue_id; + u8 thor; + u16 last_resp_code; + u16 seq_id; + u32 flag_hwrm; + u32 flags; +/* PCI info. */ + u16 subsystem_vendor; + u16 subsystem_device; + u16 cmd_reg; + u8 pf_num; /* absolute PF number */ + u8 vf; + void *bar0; + void *bar1; + void *bar2; +/* Device info. */ + u16 chip_num; +/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + u32 chip_id; + u32 hwrm_cmd_timeout; + u16 hwrm_spec_code; + u16 hwrm_max_req_len; + u16 hwrm_max_ext_req_len; + u8 mac_addr[ETH_ALEN]; /* HW MAC address */ + u16 fid; + u8 port_idx; + u8 ordinal_value; + u16 mtu; + u16 ring_grp_id; + u16 cq_ring_id; + u16 tx_ring_id; + u16 rx_ring_id; + u16 current_link_speed; + u16 link_status; + u16 wait_link_timeout; + u64 l2_filter_id; + u16 vnic_id; + u16 stat_ctx_id; + u16 vlan_id; + u16 vlan_tx; + u32 mba_cfg2; + u32 medium; + u16 support_speeds; + u32 link_set; + u8 media_detect; + u8 rsvd; + u16 max_vfs; + u16 vf_res_strategy; + u16 min_vnics; + u16 max_vnics; + u16 max_msix; + u16 min_hw_ring_grps; + u16 max_hw_ring_grps; + u16 min_tx_rings; + u16 max_tx_rings; + u16 min_rx_rings; + u16 max_rx_rings; + u16 min_cp_rings; + u16 max_cp_rings; + u16 min_rsscos_ctxs; + u16 max_rsscos_ctxs; + u16 min_l2_ctxs; + u16 max_l2_ctxs; + u16 min_stat_ctxs; + u16 max_stat_ctxs; + u16 num_cmpl_rings; + u16 num_tx_rings; + u16 num_rx_rings; + u16 num_stat_ctxs; + u16 num_hw_ring_grps; +}; + +/* defines required to rsolve checkpatch errors / warnings */ +#define test_if if +#define write32 writel +#define write64 writeq +#define pci_read_byte pci_read_config_byte +#define pci_read_word16 pci_read_config_word +#define pci_write_word pci_write_config_word +#define SHORT_CMD_SUPPORTED VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED +#define SHORT_CMD_REQUIRED VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED +#define CQ_DOORBELL_KEY_MASK(a) (\ + CMPL_DOORBELL_KEY_CMPL | \ + CMPL_DOORBELL_IDX_VALID | \ + CMPL_DOORBELL_MASK | \ + (u32)(a)) +#define CQ_DOORBELL_KEY_IDX(a) (\ + CMPL_DOORBELL_KEY_CMPL | \ + CMPL_DOORBELL_IDX_VALID | \ + (u32)(a)) +#define TX_BD_FLAGS (\ + TX_BD_SHORT_TYPE_TX_BD_SHORT |\ + TX_BD_SHORT_FLAGS_COAL_NOW |\ + TX_BD_SHORT_FLAGS_PACKET_END |\ + (1 << TX_BD_SHORT_FLAGS_BD_CNT_SFT)) +#define PORT_PHY_FLAGS (\ + BNXT_FLAG_NPAR_MODE | \ + BNXT_FLAG_MULTI_HOST) +#define RING_FREE(bp, rid, flag) bnxt_hwrm_ring_free(bp, rid, flag) +#define SET_LINK(p, m, s) ((p & (m >> s)) << s) +#define SET_MBA(p, m, s) ((p & (m >> s)) << s) +#define SPEED_DRV_MASK LINK_SPEED_DRV_MASK +#define SPEED_DRV_SHIFT LINK_SPEED_DRV_SHIFT +#define SPEED_FW_MASK LINK_SPEED_FW_MASK +#define SPEED_FW_SHIFT LINK_SPEED_FW_SHIFT +#define D3_SPEED_FW_MASK D3_LINK_SPEED_FW_MASK +#define D3_SPEED_FW_SHIFT D3_LINK_SPEED_FW_SHIFT +#define MEDIA_AUTO_DETECT_MASK PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_MASK +#define MEDIA_AUTO_DETECT_SHIFT PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_SHIFT +#define VLAN_MASK FUNC_CFG_PRE_BOOT_MBA_VLAN_MASK +#define VLAN_SHIFT FUNC_CFG_PRE_BOOT_MBA_VLAN_SHIFT +#define VLAN_VALUE_MASK FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_MASK +#define VLAN_VALUE_SHIFT FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_SHIFT +#define VF_CFG_ENABLE_FLAGS (\ + FUNC_VF_CFG_REQ_ENABLES_MTU | \ + FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN | \ + FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR | \ + FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR) + +#define CHIP_NUM_57500 0x1750 diff --git a/src/drivers/net/bnxt/bnxt_dbg.h b/src/drivers/net/bnxt/bnxt_dbg.h new file mode 100644 index 000000000..188978ad6 --- /dev/null +++ b/src/drivers/net/bnxt/bnxt_dbg.h @@ -0,0 +1,677 @@ +/* + * Copyright © 2018 Broadcom. All Rights Reserved. + * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + + * This program is free software; you can redistribute it and/or modify it under + * the terms of version 2 of the GNU General Public License as published by the + * Free Software Foundation. + + * This program is distributed in the hope that it will be useful. + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING + * ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR + * NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS + * ARE HELD TO BE LEGALLY INVALID. See the GNU General Public License for more + * details, a copy of which can be found in the file COPYING included with this + * package. + */ + +//#define DEBUG_DRV +//#define DEBUG_KEY +//#define DEBUG_PCI +//#define DEBUG_MEMORY +//#define DEBUG_LINK +//#define DEBUG_CHIP +//#define DEBUG_FAIL +//#define DEBUG_HWRM_CMDS +//#define DEBUG_HWRM_DUMP +//#define DEBUG_CQ +//#define DEBUG_CQ_DUMP +//#define DEBUG_TX +//#define DEBUG_TX_DUMP +//#define DEBUG_RX +//#define DEBUG_RX_DUMP + +#if \ + defined(DEBUG_DRV) || \ + defined(DEBUG_PCI) || \ + defined(DEBUG_CHIP) || \ + defined(DEBUG_MEMORY) || \ + defined(DEBUG_LINK) || \ + defined(DEBUG_FAIL) || \ + defined(DEBUG_HWRM_CMDS) || \ + defined(DEBUG_HWRM_DUMP) || \ + defined(DEBUG_CQ) || \ + defined(DEBUG_CQ_DUMP) || \ + defined(DEBUG_TX) || \ + defined(DEBUG_TX_DUMP) || \ + defined(DEBUG_RX) || \ + defined(DEBUG_RX_DUMP) +#define DEBUG_DEFAULT +#endif +#if defined(DEBUG_DEFAULT) +#define dbg_prn printf + +void pause_drv(void) +{ +#if defined(DEBUG_KEY) + dbg_prn(" Press a key..."); + getchar(); +#endif + dbg_prn("\n"); +} + +#define MAX_CHAR_SIZE(a) (u32)((1 << (a)) - 1) +#define DISP_U8 0x00 +#define DISP_U16 0x01 +#define DISP_U32 0x02 +#define DISP_U64 0x03 + +void dumpmemory1(u8 *buffer, u32 length, u8 flag) +{ + u32 jj = 0; + u8 i, c; + + dbg_prn("\n %p:", buffer); + for (jj = 0; jj < 16; jj++) { + if (!(jj & MAX_CHAR_SIZE(flag))) + dbg_prn(" "); + if (jj < length) + dbg_prn("%02x", buffer[jj]); + else + dbg_prn(" "); + if ((jj & 0xF) == 0xF) { + dbg_prn(" "); + for (i = 0; i < 16; i++) { + if (i < length) { + c = buffer[jj + i - 15]; + if (c >= 0x20 && c < 0x7F) + ; + else + c = '.'; + dbg_prn("%c", c); + } + } + } + } +} + +void dump_mem(u8 *buffer, u32 length, u8 flag) +{ + u32 length16, remlen, jj; + + length16 = length & 0xFFFFFFF0; + remlen = length & 0xF; + for (jj = 0; jj < length16; jj += 16) + dumpmemory1((u8 *)&buffer[jj], 16, flag); + if (remlen) + dumpmemory1((u8 *)&buffer[length16], remlen, flag); + if (length16 || remlen) + dbg_prn("\n"); +} +#else +#define dbg_prn(func) +#endif + +#if defined(DEBUG_PCI) +void dbg_pci(struct bnxt *bp, const char *func, u16 cmd_reg) +{ + struct pci_device *pdev = bp->pdev; + + dbg_prn("- %s()\n", func); + dbg_prn(" Bus:Dev:Func : %04X\n", pdev->busdevfn); + dbg_prn(" Vendor id : %04X\n", pdev->vendor); + dbg_prn(" Device id : %04X (%cF)\n", + pdev->device, (bp->vf) ? 'V' : 'P'); + dbg_prn(" Irq : %d\n", pdev->irq); + dbg_prn(" PCI Command Reg : %04X\n", cmd_reg); + dbg_prn(" Sub Vendor id : %04X\n", bp->subsystem_vendor); + dbg_prn(" Sub Device id : %04X\n", bp->subsystem_device); + dbg_prn(" PF Number : %X\n", bp->pf_num); + dbg_prn(" BAR (0) : %p %lx\n", + bp->bar0, pci_bar_start(pdev, PCI_BASE_ADDRESS_0)); + dbg_prn(" BAR (1) : %p %lx\n", + bp->bar1, pci_bar_start(pdev, PCI_BASE_ADDRESS_2)); + dbg_prn(" BAR (2) : %p %lx\n", + bp->bar2, pci_bar_start(pdev, PCI_BASE_ADDRESS_4)); + dbg_prn(" "); + pause_drv(); +} +#else +#define dbg_pci(bp, func, creg) +#endif + +#if defined(DEBUG_MEMORY) +void dbg_mem(struct bnxt *bp, const char *func) +{ + dbg_prn("- %s()\n", func); + dbg_prn(" bp Addr : %p", bp); + dbg_prn(" Len %4d", (u16)sizeof(struct bnxt)); + dbg_prn(" phy %lx\n", virt_to_bus(bp)); + dbg_prn(" bp->hwrm_req_addr : %p", bp->hwrm_addr_req); + dbg_prn(" Len %4d", (u16)REQ_BUFFER_SIZE); + dbg_prn(" phy %lx\n", bp->req_addr_mapping); + dbg_prn(" bp->hwrm_resp_addr : %p", bp->hwrm_addr_resp); + dbg_prn(" Len %4d", (u16)RESP_BUFFER_SIZE); + dbg_prn(" phy %lx\n", bp->resp_addr_mapping); + dbg_prn(" bp->dma_addr : %p", bp->hwrm_addr_dma); + dbg_prn(" Len %4d", (u16)DMA_BUFFER_SIZE); + dbg_prn(" phy %lx\n", bp->dma_addr_mapping); + dbg_prn(" bp->tx.bd_virt : %p", bp->tx.bd_virt); + dbg_prn(" Len %4d", (u16)TX_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->tx.bd_virt)); + dbg_prn(" bp->rx.bd_virt : %p", bp->rx.bd_virt); + dbg_prn(" Len %4d", (u16)RX_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->rx.bd_virt)); + dbg_prn(" bp->cq.bd_virt : %p", bp->cq.bd_virt); + dbg_prn(" Len %4d", (u16)CQ_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->cq.bd_virt)); + dbg_prn(" bp->nq.bd_virt : %p", bp->nq.bd_virt); + dbg_prn(" Len %4d", (u16)NQ_RING_BUFFER_SIZE); + dbg_prn(" phy %lx\n", virt_to_bus(bp->nq.bd_virt)); + dbg_prn(" "); + pause_drv(); +} +#else +#define dbg_mem(bp, func) (func = func) +#endif + +#if defined(DEBUG_CHIP) +void dbg_fw_ver(struct hwrm_ver_get_output *resp, u32 tmo) +{ + if (resp->hwrm_intf_maj_8b < 1) { + dbg_prn(" HWRM interface %d.%d.%d is older than 1.0.0.\n", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); + dbg_prn(" Update FW with HWRM interface 1.0.0 or newer.\n"); + } + dbg_prn(" FW Version : %d.%d.%d.%d\n", + resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, + resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); + dbg_prn(" cmd timeout : %d\n", tmo); + if (resp->hwrm_intf_maj_8b >= 1) + dbg_prn(" hwrm_max_req_len : %d\n", resp->max_req_win_len); + dbg_prn(" hwrm_max_ext_req : %d\n", resp->max_ext_req_len); + dbg_prn(" chip_num : %x\n", resp->chip_num); + dbg_prn(" chip_id : %x\n", + (u32)(resp->chip_rev << 24) | + (u32)(resp->chip_metal << 16) | + (u32)(resp->chip_bond_id << 8) | + (u32)resp->chip_platform_type); + test_if((resp->dev_caps_cfg & SHORT_CMD_SUPPORTED) && + (resp->dev_caps_cfg & SHORT_CMD_REQUIRED)) + dbg_prn(" SHORT_CMD_SUPPORTED\n"); +} + +void dbg_func_resource_qcaps(struct bnxt *bp) +{ +// Ring Groups + dbg_prn(" min_hw_ring_grps : %d\n", bp->min_hw_ring_grps); + dbg_prn(" max_hw_ring_grps : %d\n", bp->max_hw_ring_grps); +// TX Rings + dbg_prn(" min_tx_rings : %d\n", bp->min_tx_rings); + dbg_prn(" max_tx_rings : %d\n", bp->max_tx_rings); +// RX Rings + dbg_prn(" min_rx_rings : %d\n", bp->min_rx_rings); + dbg_prn(" max_rx_rings : %d\n", bp->max_rx_rings); +// Completion Rings + dbg_prn(" min_cq_rings : %d\n", bp->min_cp_rings); + dbg_prn(" max_cq_rings : %d\n", bp->max_cp_rings); +// Statistic Contexts + dbg_prn(" min_stat_ctxs : %d\n", bp->min_stat_ctxs); + dbg_prn(" max_stat_ctxs : %d\n", bp->max_stat_ctxs); +} + +void dbg_func_qcaps(struct bnxt *bp) +{ + dbg_prn(" Port Number : %d\n", bp->port_idx); + dbg_prn(" fid : 0x%04x\n", bp->fid); + dbg_prn(" PF MAC : %02x:%02x:%02x:%02x:%02x:%02x\n", + bp->mac_addr[0], + bp->mac_addr[1], + bp->mac_addr[2], + bp->mac_addr[3], + bp->mac_addr[4], + bp->mac_addr[5]); +} + +void dbg_func_qcfg(struct bnxt *bp) +{ + dbg_prn(" ordinal_value : %d\n", bp->ordinal_value); + dbg_prn(" stat_ctx_id : %x\n", bp->stat_ctx_id); + if (bp->vf) { + dbg_func_qcaps(bp); + dbg_prn(" vlan_id : %d\n", bp->vlan_id); + } +} + +void prn_set_speed(u32 speed) +{ + u32 speed1 = ((speed & LINK_SPEED_DRV_MASK) >> LINK_SPEED_DRV_SHIFT); + + dbg_prn(" Set Link Speed : "); + switch (speed & LINK_SPEED_DRV_MASK) { + case LINK_SPEED_DRV_1G: + dbg_prn("1 GBPS"); + break; + case LINK_SPEED_DRV_10G: + dbg_prn("10 GBPS"); + break; + case LINK_SPEED_DRV_25G: + dbg_prn("25 GBPS"); + break; + case LINK_SPEED_DRV_40G: + dbg_prn("40 GBPS"); + break; + case LINK_SPEED_DRV_50G: + dbg_prn("50 GBPS"); + break; + case LINK_SPEED_DRV_100G: + dbg_prn("100 GBPS"); + break; + case LINK_SPEED_DRV_200G: + dbg_prn("200 GBPS"); + break; + case LINK_SPEED_DRV_AUTONEG: + dbg_prn("AUTONEG"); + break; + default: + dbg_prn("%x", speed1); + break; + } + dbg_prn("\n"); +} + +void dbg_chip_info(struct bnxt *bp) +{ + if (bp->thor) + dbg_prn(" NQ Ring Id : %d\n", bp->nq_ring_id); + else + dbg_prn(" Grp ID : %d\n", bp->ring_grp_id); + dbg_prn(" Stat Ctx ID : %d\n", bp->stat_ctx_id); + dbg_prn(" CQ Ring Id : %d\n", bp->cq_ring_id); + dbg_prn(" Tx Ring Id : %d\n", bp->tx_ring_id); + dbg_prn(" Rx ring Id : %d\n", bp->rx_ring_id); + dbg_prn(" "); + pause_drv(); +} + +void dbg_num_rings(struct bnxt *bp) +{ + dbg_prn(" num_cmpl_rings : %d\n", bp->num_cmpl_rings); + dbg_prn(" num_tx_rings : %d\n", bp->num_tx_rings); + dbg_prn(" num_rx_rings : %d\n", bp->num_rx_rings); + dbg_prn(" num_ring_grps : %d\n", bp->num_hw_ring_grps); + dbg_prn(" num_stat_ctxs : %d\n", bp->num_stat_ctxs); +} + +void dbg_flags(const char *func, u32 flags) +{ + dbg_prn("- %s()\n", func); + dbg_prn(" bp->flags : 0x%04x\n", flags); +} + +void dbg_bnxt_pause(void) +{ + dbg_prn(" "); + pause_drv(); +} +#else +#define dbg_fw_ver(resp, tmo) +#define dbg_func_resource_qcaps(bp) +#define dbg_func_qcaps(bp) +#define dbg_func_qcfg(bp) +#define prn_set_speed(speed) +#define dbg_chip_info(bp) +#define dbg_num_rings(bp) +#define dbg_flags(func, flags) +#define dbg_bnxt_pause() +#endif + +#if defined(DEBUG_HWRM_CMDS) || defined(DEBUG_FAIL) +void dump_hwrm_req(struct bnxt *bp, const char *func, u32 len, u32 tmo) +{ + dbg_prn("- %s(0x%04x) cmd_len %d cmd_tmo %d", + func, (u16)((struct input *)bp->hwrm_addr_req)->req_type, + len, tmo); +#if defined(DEBUG_HWRM_DUMP) + dump_mem((u8 *)bp->hwrm_addr_req, len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void debug_resp(struct bnxt *bp, const char *func, u32 resp_len, u16 err) +{ + dbg_prn("- %s(0x%04x) - ", + func, (u16)((struct input *)bp->hwrm_addr_req)->req_type); + if (err == STATUS_SUCCESS) + dbg_prn("Done"); + else if (err != STATUS_TIMEOUT) + dbg_prn("Fail err 0x%04x", err); + else + dbg_prn("timedout"); +#if defined(DEBUG_HWRM_DUMP) + if (err != STATUS_TIMEOUT) { + dump_mem((u8 *)bp->hwrm_addr_resp, resp_len, DISP_U8); + sleep(1); + } else + dbg_prn("\n"); +#else + resp_len = resp_len; + dbg_prn("\n"); +#endif +} + +void dbg_hw_cmd(struct bnxt *bp, + const char *func, u16 cmd_len, + u16 resp_len, u32 cmd_tmo, u16 err) +{ +#if !defined(DEBUG_HWRM_CMDS) + if (err) +#endif + { + dump_hwrm_req(bp, func, cmd_len, cmd_tmo); + debug_resp(bp, func, resp_len, err); + } +} +#else +#define dbg_hw_cmd(bp, func, cmd_len, resp_len, cmd_tmo, err) (func = func) +#endif + +#if defined(DEBUG_HWRM_CMDS) +void dbg_short_cmd(u8 *req, const char *func, u32 len) +{ + struct hwrm_short_input *sreq; + + sreq = (struct hwrm_short_input *)req; + dbg_prn("- %s(0x%04x) short_cmd_len %d", + func, + sreq->req_type, + (int)len); +#if defined(DEBUG_HWRM_DUMP) + dump_mem((u8 *)sreq, len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} +#else +#define dbg_short_cmd(sreq, func, len) +#endif + +#if defined(DEBUG_RX) +void dump_rx_bd(struct rx_pkt_cmpl *rx_cmp, + struct rx_pkt_cmpl_hi *rx_cmp_hi, + u32 desc_idx) +{ + dbg_prn(" RX desc_idx %d PktLen %d\n", desc_idx, rx_cmp->len); + dbg_prn("- rx_cmp %lx", virt_to_bus(rx_cmp)); +#if defined(DEBUG_RX_DUMP) + dump_mem((u8 *)rx_cmp, (u32)sizeof(struct rx_pkt_cmpl), DISP_U8); +#else + dbg_prn("\n"); +#endif + dbg_prn("- rx_cmp_hi %lx", virt_to_bus(rx_cmp_hi)); +#if defined(DEBUG_RX_DUMP) + dump_mem((u8 *)rx_cmp_hi, (u32)sizeof(struct rx_pkt_cmpl_hi), DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dbg_rx_vlan(struct bnxt *bp, u32 meta, u16 f2, u16 rx_vid) +{ + dbg_prn(" Rx VLAN metadata %x flags2 %x\n", meta, f2); + dbg_prn(" Rx VLAN MBA %d TX %d RX %d\n", + bp->vlan_id, bp->vlan_tx, rx_vid); +} + +void dbg_alloc_rx_iob(struct io_buffer *iob, u16 id, u16 cid) +{ + dbg_prn(" Rx alloc_iob (%d) %p bd_virt (%d)\n", + id, iob->data, cid); +} + +void dbg_rx_cid(u16 idx, u16 cid) +{ + dbg_prn("- RX old cid %d new cid %d\n", idx, cid); +} + +void dbg_alloc_rx_iob_fail(u16 iob_idx, u16 cons_id) +{ + dbg_prn(" Rx alloc_iob (%d) ", iob_idx); + dbg_prn("failed for cons_id %d\n", cons_id); +} + +void dbg_rxp(u8 *iob, u16 rx_len, u8 drop) +{ + dbg_prn("- RX iob %lx Len %d ", virt_to_bus(iob), rx_len); + if (drop == 1) + dbg_prn("drop ErrPkt "); + else if (drop == 2) + dbg_prn("drop LoopBack "); + else if (drop == 3) + dbg_prn("drop VLAN"); +#if defined(DEBUG_RX_DUMP) + dump_mem(iob, (u32)rx_len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dbg_rx_stat(struct bnxt *bp) +{ + dbg_prn("- RX Stat Total %d Good %d Drop err %d LB %d VLAN %d\n", + bp->rx.cnt, bp->rx.good, + bp->rx.drop_err, bp->rx.drop_lb, bp->rx.drop_vlan); +} +#else +#define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx) +#define dbg_rx_vlan(bp, metadata, flags2, rx_vid) +#define dbg_alloc_rx_iob(iob, id, cid) +#define dbg_rx_cid(idx, cid) +#define dbg_alloc_rx_iob_fail(iob_idx, cons_id) +#define dbg_rxp(iob, rx_len, drop) +#define dbg_rx_stat(bp) +#endif + +#if defined(DEBUG_CQ) +static void dump_cq(struct cmpl_base *cmp, u16 cid) +{ + dbg_prn("- CQ Type "); + switch (cmp->type & CMPL_BASE_TYPE_MASK) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + dbg_prn("(ae)"); + break; + case CMPL_BASE_TYPE_STAT_EJECT: + dbg_prn("(se)"); + break; + case CMPL_BASE_TYPE_TX_L2: + dbg_prn("(tx)"); + break; + case CMPL_BASE_TYPE_RX_L2: + dbg_prn("(rx)"); + break; + default: + dbg_prn("%04x", (u16)(cmp->type & CMPL_BASE_TYPE_MASK)); + break; + } + dbg_prn(" cid %d", cid); +#if defined(DEBUG_CQ_DUMP) + dump_mem((u8 *)cmp, (u32)sizeof(struct cmpl_base), DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +static void dump_nq(struct nq_base *nqp, u16 cid) +{ + dbg_prn("- NQ Type %lx cid %d", (nqp->type & NQ_CN_TYPE_MASK), cid); +#if defined(DEBUG_CQ_DUMP) + dump_mem((u8 *)nqp, (u32)sizeof(struct nq_base), DISP_U8); +#else + dbg_prn("\n"); +#endif +} +#else +#define dump_cq(cq, id) +#define dump_nq(nq, id) +#endif + +#if defined(DEBUG_TX) +void dbg_tx_avail(struct bnxt *bp, u32 avail, u16 use) +{ + dbg_prn("- Tx BD %d Avail %d Use %d pid %d cid %d\n", + bp->tx.ring_cnt, + avail, use, + bp->tx.prod_id, + bp->tx.cons_id); +} + +void dbg_tx_vlan(struct bnxt *bp, char *src, u16 plen, u16 len) +{ + dbg_prn("- Tx VLAN PKT %d MBA %d", bp->vlan_tx, bp->vlan_id); + dbg_prn(" PKT %d", + BYTE_SWAP_S(*(u16 *)(&src[MAC_HDR_SIZE + 2]))); + dbg_prn(" Pro %x", + BYTE_SWAP_S(*(u16 *)(&src[MAC_HDR_SIZE]))); + dbg_prn(" old len %d new len %d\n", plen, len); +} + +void dbg_tx_pad(u16 plen, u16 len) +{ + if (len != plen) + dbg_prn("- Tx padded(0) old len %d new len %d\n", plen, len); +} + +void dump_tx_stat(struct bnxt *bp) +{ + dbg_prn(" TX stats cnt %d req_cnt %d", bp->tx.cnt, bp->tx.cnt_req); + dbg_prn(" prod_id %d cons_id %d\n", bp->tx.prod_id, bp->tx.cons_id); +} + +void dump_tx_pkt(u8 *pkt, u16 len, u16 idx) +{ + dbg_prn(" TX(%d) Addr %lx Size %d", idx, virt_to_bus(pkt), len); +#if defined(DEBUG_TX_DUMP) + dump_mem(pkt, (u32)len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dump_tx_bd(struct tx_bd_short *tx_bd, u16 len, int idx) +{ + dbg_prn(" Tx(%d) BD Addr %lx Size %d", idx, virt_to_bus(tx_bd), len); +#if defined(DEBUG_TX_DUMP) + dump_mem((u8 *)tx_bd, (u32)len, DISP_U8); +#else + dbg_prn("\n"); +#endif +} + +void dbg_tx_done(u8 *pkt, u16 len, u16 idx) +{ + dbg_prn(" Tx(%d) Done pkt %lx Size %d\n", idx, virt_to_bus(pkt), len); +} +#else +#define dbg_tx_avail(bp, a, u) +#define dbg_tx_vlan(bp, src, plen, len) +#define dbg_tx_pad(plen, len) +#define dump_tx_stat(bp) +#define dump_tx_pkt(pkt, len, idx) +#define dump_tx_bd(prod_bd, len, idx) +#define dbg_tx_done(pkt, len, idx) +#endif + +#if defined(DEBUG_LINK) +static void dump_evt(u8 *cmp, u32 type, u16 cid, u8 ring) +{ + u32 size; + u8 c; + + if (ring) { + c = 'N'; + size = sizeof(struct nq_base); + } else { + c = 'C'; + size = sizeof(struct cmpl_base); + } + switch (type) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + break; + default: + return; + } + dbg_prn("- %cQ Type (ae) cid %d", c, cid); + dump_mem(cmp, size, DISP_U8); +} + +void dbg_link_info(struct bnxt *bp) +{ + dbg_prn(" Current Speed : "); + switch (bp->current_link_speed) { + case PORT_PHY_QCFG_RESP_LINK_SPEED_200GB: + dbg_prn("200 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_100GB: + dbg_prn("100 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_50GB: + dbg_prn("50 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_40GB: + dbg_prn("40 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_25GB: + dbg_prn("25 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_20GB: + dbg_prn("20 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_10GB: + dbg_prn("10 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB: + dbg_prn("2.5 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_2GB: + dbg_prn("2 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_1GB: + dbg_prn("1 %s", str_gbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_100MB: + dbg_prn("100 %s", str_mbps); + break; + case PORT_PHY_QCFG_RESP_LINK_SPEED_10MB: + dbg_prn("10 %s", str_mbps); + break; + default: + dbg_prn("%x", bp->current_link_speed); + } + dbg_prn("\n"); + dbg_prn(" media_detect : %x\n", bp->media_detect); +} + +void dbg_link_status(struct bnxt *bp) +{ + dbg_prn(" Port(%d) : Link", bp->port_idx); + if (bp->link_status == STATUS_LINK_ACTIVE) + dbg_prn("Up"); + else + dbg_prn("Down"); + dbg_prn("\n"); +} + +void dbg_link_state(struct bnxt *bp, u32 tmo) +{ + dbg_link_status(bp); + dbg_link_info(bp); + dbg_prn(" Link wait time : %d ms", tmo); + pause_drv(); +} +#else +#define dump_evt(cq, ty, id, ring) +#define dbg_link_status(bp) +#define dbg_link_state(bp, tmo) +#endif diff --git a/src/drivers/net/bnxt/bnxt_hsi.h b/src/drivers/net/bnxt/bnxt_hsi.h new file mode 100644 index 000000000..086acb8b3 --- /dev/null +++ b/src/drivers/net/bnxt/bnxt_hsi.h @@ -0,0 +1,10337 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2019 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * DO NOT MODIFY!!! This file is automatically generated. + */ + +#ifndef _BNXT_HSI_H_ +#define _BNXT_HSI_H_ + +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +#define CMD_DISCR_TLV_ENCAP 0x8000UL +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP + +#define TLV_TYPE_HWRM_REQUEST 0x1UL +#define TLV_TYPE_HWRM_RESPONSE 0x2UL +#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL +#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL +#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL +#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL +#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL +#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE + +/* tlv (size:64b/8B) */ +struct tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 flags; + #define TLV_FLAGS_MORE 0x1UL + #define TLV_FLAGS_MORE_LAST 0x0UL + #define TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define TLV_FLAGS_REQUIRED 0x2UL + #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; +}; + +/* input (size:128b/16B) */ +struct input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* output (size:64b/8B) */ +struct output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { + __le16 req_type; + __le16 signature; + #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL + #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD + __le16 unused_0; + __le16 size; + __le64 req_addr; +}; + +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + __le16 req_type; + #define HWRM_VER_GET 0x0UL + #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL + #define HWRM_FUNC_BUF_UNRGTR 0xeUL + #define HWRM_FUNC_VF_CFG 0xfUL + #define HWRM_RESERVED1 0x10UL + #define HWRM_FUNC_RESET 0x11UL + #define HWRM_FUNC_GETFID 0x12UL + #define HWRM_FUNC_VF_ALLOC 0x13UL + #define HWRM_FUNC_VF_FREE 0x14UL + #define HWRM_FUNC_QCAPS 0x15UL + #define HWRM_FUNC_QCFG 0x16UL + #define HWRM_FUNC_CFG 0x17UL + #define HWRM_FUNC_QSTATS 0x18UL + #define HWRM_FUNC_CLR_STATS 0x19UL + #define HWRM_FUNC_DRV_UNRGTR 0x1aUL + #define HWRM_FUNC_VF_RESC_FREE 0x1bUL + #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL + #define HWRM_FUNC_DRV_RGTR 0x1dUL + #define HWRM_FUNC_DRV_QVER 0x1eUL + #define HWRM_FUNC_BUF_RGTR 0x1fUL + #define HWRM_PORT_PHY_CFG 0x20UL + #define HWRM_PORT_MAC_CFG 0x21UL + #define HWRM_PORT_TS_QUERY 0x22UL + #define HWRM_PORT_QSTATS 0x23UL + #define HWRM_PORT_LPBK_QSTATS 0x24UL + #define HWRM_PORT_CLR_STATS 0x25UL + #define HWRM_PORT_LPBK_CLR_STATS 0x26UL + #define HWRM_PORT_PHY_QCFG 0x27UL + #define HWRM_PORT_MAC_QCFG 0x28UL + #define HWRM_PORT_MAC_PTP_QCFG 0x29UL + #define HWRM_PORT_PHY_QCAPS 0x2aUL + #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL + #define HWRM_PORT_PHY_I2C_READ 0x2cUL + #define HWRM_PORT_LED_CFG 0x2dUL + #define HWRM_PORT_LED_QCFG 0x2eUL + #define HWRM_PORT_LED_QCAPS 0x2fUL + #define HWRM_QUEUE_QPORTCFG 0x30UL + #define HWRM_QUEUE_QCFG 0x31UL + #define HWRM_QUEUE_CFG 0x32UL + #define HWRM_FUNC_VLAN_CFG 0x33UL + #define HWRM_FUNC_VLAN_QCFG 0x34UL + #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL + #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL + #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL + #define HWRM_QUEUE_PRI2COS_CFG 0x38UL + #define HWRM_QUEUE_COS2BW_QCFG 0x39UL + #define HWRM_QUEUE_COS2BW_CFG 0x3aUL + #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL + #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL + #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL + #define HWRM_VNIC_ALLOC 0x40UL + #define HWRM_VNIC_FREE 0x41UL + #define HWRM_VNIC_CFG 0x42UL + #define HWRM_VNIC_QCFG 0x43UL + #define HWRM_VNIC_TPA_CFG 0x44UL + #define HWRM_VNIC_TPA_QCFG 0x45UL + #define HWRM_VNIC_RSS_CFG 0x46UL + #define HWRM_VNIC_RSS_QCFG 0x47UL + #define HWRM_VNIC_PLCMODES_CFG 0x48UL + #define HWRM_VNIC_PLCMODES_QCFG 0x49UL + #define HWRM_VNIC_QCAPS 0x4aUL + #define HWRM_RING_ALLOC 0x50UL + #define HWRM_RING_FREE 0x51UL + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL + #define HWRM_RING_AGGINT_QCAPS 0x54UL + #define HWRM_RING_RESET 0x5eUL + #define HWRM_RING_GRP_ALLOC 0x60UL + #define HWRM_RING_GRP_FREE 0x61UL + #define HWRM_RESERVED5 0x64UL + #define HWRM_RESERVED6 0x65UL + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL + #define HWRM_CFA_L2_FILTER_FREE 0x91UL + #define HWRM_CFA_L2_FILTER_CFG 0x92UL + #define HWRM_CFA_L2_SET_RX_MASK 0x93UL + #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL + #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL + #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL + #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL + #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL + #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL + #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL + #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL + #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL + #define HWRM_CFA_EM_FLOW_FREE 0x9dUL + #define HWRM_CFA_EM_FLOW_CFG 0x9eUL + #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL + #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL + #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL + #define HWRM_STAT_CTX_ENG_QUERY 0xafUL + #define HWRM_STAT_CTX_ALLOC 0xb0UL + #define HWRM_STAT_CTX_FREE 0xb1UL + #define HWRM_STAT_CTX_QUERY 0xb2UL + #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_PORT_QSTATS_EXT 0xb4UL + #define HWRM_FW_RESET 0xc0UL + #define HWRM_FW_QSTATUS 0xc1UL + #define HWRM_FW_HEALTH_CHECK 0xc2UL + #define HWRM_FW_SYNC 0xc3UL + #define HWRM_FW_SET_TIME 0xc8UL + #define HWRM_FW_GET_TIME 0xc9UL + #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL + #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL + #define HWRM_FW_IPC_MAILBOX 0xccUL + #define HWRM_EXEC_FWD_RESP 0xd0UL + #define HWRM_REJECT_FWD_RESP 0xd1UL + #define HWRM_FWD_RESP 0xd2UL + #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_OEM_CMD 0xd4UL + #define HWRM_TEMP_MONITOR_QUERY 0xe0UL + #define HWRM_WOL_FILTER_ALLOC 0xf0UL + #define HWRM_WOL_FILTER_FREE 0xf1UL + #define HWRM_WOL_FILTER_QCFG 0xf2UL + #define HWRM_WOL_REASON_QCFG 0xf3UL + #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL + #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL + #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL + #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL + #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL + #define HWRM_CFA_VFR_ALLOC 0xfdUL + #define HWRM_CFA_VFR_FREE 0xfeUL + #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL + #define HWRM_CFA_VF_PAIR_FREE 0x101UL + #define HWRM_CFA_VF_PAIR_INFO 0x102UL + #define HWRM_CFA_FLOW_ALLOC 0x103UL + #define HWRM_CFA_FLOW_FREE 0x104UL + #define HWRM_CFA_FLOW_FLUSH 0x105UL + #define HWRM_CFA_FLOW_STATS 0x106UL + #define HWRM_CFA_FLOW_INFO 0x107UL + #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL + #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL + #define HWRM_CFA_PAIR_ALLOC 0x10dUL + #define HWRM_CFA_PAIR_FREE 0x10eUL + #define HWRM_CFA_PAIR_INFO 0x10fUL + #define HWRM_FW_IPC_MSG 0x110UL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL + #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL + #define HWRM_CFA_FLOW_AGING_CFG 0x114UL + #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL + #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL + #define HWRM_ENGINE_CKV_HELLO 0x12dUL + #define HWRM_ENGINE_CKV_STATUS 0x12eUL + #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL + #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL + #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL + #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL + #define HWRM_ENGINE_CKV_FLUSH 0x133UL + #define HWRM_ENGINE_CKV_RNG_GET 0x134UL + #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL + #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL + #define HWRM_ENGINE_QG_QUERY 0x13dUL + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL + #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL + #define HWRM_ENGINE_QG_METER_QUERY 0x142UL + #define HWRM_ENGINE_QG_METER_BIND 0x143UL + #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL + #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL + #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL + #define HWRM_ENGINE_SG_QUERY 0x147UL + #define HWRM_ENGINE_SG_METER_QUERY 0x148UL + #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL + #define HWRM_ENGINE_SG_QG_BIND 0x14aUL + #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL + #define HWRM_ENGINE_CONFIG_QUERY 0x154UL + #define HWRM_ENGINE_STATS_CONFIG 0x155UL + #define HWRM_ENGINE_STATS_CLEAR 0x156UL + #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_RQ_ALLOC 0x15eUL + #define HWRM_ENGINE_RQ_FREE 0x15fUL + #define HWRM_ENGINE_CQ_ALLOC 0x160UL + #define HWRM_ENGINE_CQ_FREE 0x161UL + #define HWRM_ENGINE_NQ_ALLOC 0x162UL + #define HWRM_ENGINE_NQ_FREE 0x163UL + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL + #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL + #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL + #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL + #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL + #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL + #define HWRM_FUNC_VF_BW_CFG 0x195UL + #define HWRM_FUNC_VF_BW_QCFG 0x196UL + #define HWRM_SELFTEST_QLIST 0x200UL + #define HWRM_SELFTEST_EXEC 0x201UL + #define HWRM_SELFTEST_IRQ 0x202UL + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_PCIE_QSTATS 0x204UL + #define HWRM_DBG_READ_DIRECT 0xff10UL + #define HWRM_DBG_READ_INDIRECT 0xff11UL + #define HWRM_DBG_WRITE_DIRECT 0xff12UL + #define HWRM_DBG_WRITE_INDIRECT 0xff13UL + #define HWRM_DBG_DUMP 0xff14UL + #define HWRM_DBG_ERASE_NVM 0xff15UL + #define HWRM_DBG_CFG 0xff16UL + #define HWRM_DBG_COREDUMP_LIST 0xff17UL + #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL + #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL + #define HWRM_DBG_FW_CLI 0xff1aUL + #define HWRM_DBG_I2C_CMD 0xff1bUL + #define HWRM_DBG_RING_INFO_GET 0xff1cUL + #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL + #define HWRM_NVM_VALIDATE_OPTION 0xffefUL + #define HWRM_NVM_FLUSH 0xfff0UL + #define HWRM_NVM_GET_VARIABLE 0xfff1UL + #define HWRM_NVM_SET_VARIABLE 0xfff2UL + #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL + #define HWRM_NVM_MODIFY 0xfff4UL + #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL + #define HWRM_NVM_GET_DEV_INFO 0xfff6UL + #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL + #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL + #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL + #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL + #define HWRM_NVM_GET_DIR_INFO 0xfffbUL + #define HWRM_NVM_RAW_DUMP 0xfffcUL + #define HWRM_NVM_READ 0xfffdUL + #define HWRM_NVM_WRITE 0xfffeUL + #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + __le16 unused_0[3]; +}; + +/* ret_codes (size:64b/8B) */ +struct ret_codes { + __le16 error_code; + #define HWRM_ERR_CODE_SUCCESS 0x0UL + #define HWRM_ERR_CODE_FAIL 0x1UL + #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL + #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL + #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL + #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL + #define HWRM_ERR_CODE_NO_BUFFER 0x8UL + #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL + #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL + #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL + #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL + #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL + #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED + __le16 unused_0[3]; +}; + +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque_0; + __le16 opaque_1; + u8 cmd_err; + u8 valid; +}; + +#define HWRM_NA_SIGNATURE ((__le32)(-1)) +#define HWRM_MAX_REQ_LEN 128 +#define HWRM_MAX_RESP_LEN 280 +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 10 +#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_RSVD 18 +#define HWRM_VERSION_STR "1.10.0.18" + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 unused_0[5]; +}; + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hwrm_intf_maj_8b; + u8 hwrm_intf_min_8b; + u8 hwrm_intf_upd_8b; + u8 hwrm_intf_rsvd_8b; + u8 hwrm_fw_maj_8b; + u8 hwrm_fw_min_8b; + u8 hwrm_fw_bld_8b; + u8 hwrm_fw_rsvd_8b; + u8 mgmt_fw_maj_8b; + u8 mgmt_fw_min_8b; + u8 mgmt_fw_bld_8b; + u8 mgmt_fw_rsvd_8b; + u8 netctrl_fw_maj_8b; + u8 netctrl_fw_min_8b; + u8 netctrl_fw_bld_8b; + u8 netctrl_fw_rsvd_8b; + __le32 dev_caps_cfg; + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL + #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL + #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL + #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL + #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL + #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL + u8 roce_fw_maj_8b; + u8 roce_fw_min_8b; + u8 roce_fw_bld_8b; + u8 roce_fw_rsvd_8b; + char hwrm_fw_name[16]; + char mgmt_fw_name[16]; + char netctrl_fw_name[16]; + u8 reserved2[16]; + char roce_fw_name[16]; + __le16 chip_num; + u8 chip_rev; + u8 chip_metal; + u8 chip_bond_id; + u8 chip_platform_type; + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM + __le16 max_req_win_len; + __le16 max_resp_len; + __le16 def_req_timeout; + u8 flags; + #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL + #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL + u8 unused_0[2]; + u8 always_1; + __le16 hwrm_intf_major; + __le16 hwrm_intf_minor; + __le16 hwrm_intf_build; + __le16 hwrm_intf_patch; + __le16 hwrm_fw_major; + __le16 hwrm_fw_minor; + __le16 hwrm_fw_build; + __le16 hwrm_fw_patch; + __le16 mgmt_fw_major; + __le16 mgmt_fw_minor; + __le16 mgmt_fw_build; + __le16 mgmt_fw_patch; + __le16 netctrl_fw_major; + __le16 netctrl_fw_minor; + __le16 netctrl_fw_build; + __le16 netctrl_fw_patch; + __le16 roce_fw_major; + __le16 roce_fw_minor; + __le16 roce_fw_build; + __le16 roce_fw_patch; + __le16 max_ext_req_len; + u8 unused_1[5]; + u8 valid; +}; + +/* eject_cmpl (size:128b/16B) */ +struct eject_cmpl { + __le16 type; + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + #define EJECT_CMPL_FLAGS_MASK 0xffc0UL + #define EJECT_CMPL_FLAGS_SFT 6 + #define EJECT_CMPL_FLAGS_ERROR 0x40UL + __le16 len; + __le32 opaque; + __le16 v; + #define EJECT_CMPL_V 0x1UL + #define EJECT_CMPL_ERRORS_MASK 0xfffeUL + #define EJECT_CMPL_ERRORS_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH + __le16 reserved16; + __le32 unused_2; +}; + +/* hwrm_cmpl (size:128b/16B) */ +struct hwrm_cmpl { + __le16 type; + #define CMPL_TYPE_MASK 0x3fUL + #define CMPL_TYPE_SFT 0 + #define CMPL_TYPE_HWRM_DONE 0x20UL + #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE + __le16 sequence_id; + __le32 unused_1; + __le32 v; + #define CMPL_V 0x1UL + __le32 unused_3; +}; + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ +struct hwrm_fwd_req_cmpl { + __le16 req_len_type; + #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL + #define FWD_REQ_CMPL_TYPE_SFT 0 + #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL + #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL + #define FWD_REQ_CMPL_REQ_LEN_SFT 6 + __le16 source_id; + __le32 unused0; + __le32 req_buf_addr_v[2]; + #define FWD_REQ_CMPL_V 0x1UL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +}; + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ +struct hwrm_fwd_resp_cmpl { + __le16 type; + #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL + #define FWD_RESP_CMPL_TYPE_SFT 0 + #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL + #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + __le16 source_id; + __le16 resp_len; + __le16 unused_1; + __le32 resp_buf_addr_v[2]; + #define FWD_RESP_CMPL_V 0x1UL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +}; + +/* hwrm_async_event_cmpl (size:128b/16B) */ +struct hwrm_async_event_cmpl { + __le16 type; + #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_V 0x1UL + #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20 +}; + +/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_mtu_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +}; + +/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_dcb_config_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE +}; + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL +}; + +/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_phy_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE 0x20000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE 0x40000UL +}; + +/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ +struct hwrm_async_event_cmpl_reset_notify { + __le16 type; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 +}; + +/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_flr_proc_cmplt { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_flr { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL +}; + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL +}; + +/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_llfc_pfc_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK 0x3UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC 0x2UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK 0x1cUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT 2 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK 0x1fffe0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT 5 +}; + +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10 +}; + +/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ +struct hwrm_async_event_cmpl_hw_flow_aged { + __le16 type; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX +}; + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + __le16 type; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + __le32 event_data2; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* hwrm_func_reset_input (size:192b/24B) */ +struct hwrm_func_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + u8 func_reset_level; + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF + u8 unused_0; +}; + +/* hwrm_func_reset_output (size:128b/16B) */ +struct hwrm_func_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_getfid_input (size:192b/24B) */ +struct hwrm_func_getfid_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL + __le16 pci_id; + u8 unused_0[2]; +}; + +/* hwrm_func_getfid_output (size:128b/16B) */ +struct hwrm_func_getfid_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ +struct hwrm_func_vf_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ +struct hwrm_func_vf_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_vf_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_free_input (size:192b/24B) */ +struct hwrm_func_vf_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_free_output (size:128b/16B) */ +struct hwrm_func_vf_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + __le16 mtu; + __le16 guest_vlan; + __le16 async_event_cr; + u8 dflt_mac_addr[6]; + __le32 flags; + #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL + #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL + #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL + #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL + #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL + #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL + #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL + #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 unused_0[4]; +}; + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_qcaps_input (size:192b/24B) */ +struct hwrm_func_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcaps_output (size:640b/80B) */ +struct hwrm_func_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le32 flags; + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + u8 mac_address[6]; + __le16 max_rsscos_ctx; + __le16 max_cmpl_rings; + __le16 max_tx_rings; + __le16 max_rx_rings; + __le16 max_l2_ctxs; + __le16 max_vnics; + __le16 first_vf_id; + __le16 max_vfs; + __le16 max_stat_ctx; + __le32 max_encap_records; + __le32 max_decap_records; + __le32 max_tx_em_flows; + __le32 max_tx_wm_flows; + __le32 max_rx_em_flows; + __le32 max_rx_wm_flows; + __le32 max_mcast_filters; + __le32 max_flow_id; + __le32 max_hw_ring_grps; + __le16 max_sp_tx_rings; + u8 unused_0; + u8 valid; +}; + +/* hwrm_func_qcfg_input (size:192b/24B) */ +struct hwrm_func_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcfg_output (size:704b/88B) */ +struct hwrm_func_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le16 vlan; + __le16 flags; + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL + #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL + #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL + #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL + u8 mac_address[6]; + __le16 pci_id; + __le16 alloc_rsscos_ctx; + __le16 alloc_cmpl_rings; + __le16 alloc_tx_rings; + __le16 alloc_rx_rings; + __le16 alloc_l2_ctx; + __le16 alloc_vnics; + __le16 mtu; + __le16 mru; + __le16 stat_ctx_id; + u8 port_partition_type; + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN + u8 port_pf_cnt; + #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL + #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL + __le16 dflt_vnic_id; + __le16 max_mtu_configured; + __le32 min_bw; + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID + u8 evb_mode; + #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL + #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL + #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL + #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA + u8 options; + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4 + __le16 alloc_vfs; + __le32 alloc_mcast_filters; + __le32 alloc_hw_ring_grps; + __le16 alloc_sp_tx_rings; + __le16 alloc_stat_ctx; + __le16 alloc_msix; + __le16 registered_vfs; + u8 unused_1[3]; + u8 always_1; + __le32 reset_addr_poll; + u8 unused_2[3]; + u8 valid; +}; + +/* hwrm_func_cfg_input (size:704b/88B) */ +struct hwrm_func_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 num_msix; + __le32 flags; + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL + #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL + #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2 + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL + #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL + #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL + #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL + #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL + #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL + #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL + #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL + #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL + #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL + #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL + __le32 enables; + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + __le16 mtu; + __le16 mru; + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 dflt_mac_addr[6]; + __le16 dflt_vlan; + __be32 dflt_ip_addr[4]; + __le32 min_bw; + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + __le16 async_event_cr; + u8 vlan_antispoof_mode; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + u8 allowed_vlan_pris; + u8 evb_mode; + #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL + #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL + #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL + #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA + u8 options; + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 + __le16 num_mcast_filters; +}; + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_qstats_input (size:192b/24B) */ +struct hwrm_func_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qstats_output (size:1408b/176B) */ +struct hwrm_func_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_clr_stats_input (size:192b/24B) */ +struct hwrm_func_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_clr_stats_output (size:128b/16B) */ +struct hwrm_func_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ +struct hwrm_func_vf_resc_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ +struct hwrm_func_vf_resc_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ +struct hwrm_func_drv_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL + #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL + __le32 enables; + #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL + #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL + #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL + __le16 os_type; + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le32 timestamp; + u8 unused_1[4]; + __le32 vf_req_fwd[8]; + __le32 async_event_fwd[8]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; +}; + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ +struct hwrm_func_drv_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ +struct hwrm_func_drv_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ +struct hwrm_func_drv_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ +struct hwrm_func_buf_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL + #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL + __le16 vf_id; + __le16 req_buf_num_pages; + __le16 req_buf_page_size; + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G + __le16 req_buf_len; + __le16 resp_buf_len; + u8 unused_0[2]; + __le64 req_buf_page_addr0; + __le64 req_buf_page_addr1; + __le64 req_buf_page_addr2; + __le64 req_buf_page_addr3; + __le64 req_buf_page_addr4; + __le64 req_buf_page_addr5; + __le64 req_buf_page_addr6; + __le64 req_buf_page_addr7; + __le64 req_buf_page_addr8; + __le64 req_buf_page_addr9; + __le64 error_buf_addr; + __le64 resp_buf_addr; +}; + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ +struct hwrm_func_buf_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_unrgtr_input (size:192b/24B) */ +struct hwrm_func_buf_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_UNRGTR_REQ_ENABLES_VF_ID 0x1UL + __le16 vf_id; + u8 unused_0[2]; +}; + +/* hwrm_func_buf_unrgtr_output (size:128b/16B) */ +struct hwrm_func_buf_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_qver_input (size:192b/24B) */ +struct hwrm_func_drv_qver_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 reserved; + __le16 fid; + u8 unused_0[2]; +}; + +/* hwrm_func_drv_qver_output (size:256b/32B) */ +struct hwrm_func_drv_qver_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 os_type; + #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ +struct hwrm_func_resource_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 max_vfs; + __le16 max_msix; + __le16 vf_reservation_strategy; + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 max_tx_scheduler_inputs; + __le16 flags; + #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_resource_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 max_msix; + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 flags; + #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL + u8 unused_0[2]; +}; + +/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ +struct hwrm_func_vf_resource_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reserved_rsscos_ctx; + __le16 reserved_cmpl_rings; + __le16 reserved_tx_rings; + __le16 reserved_rx_rings; + __le16 reserved_l2_ctxs; + __le16 reserved_vnics; + __le16 reserved_stat_ctx; + __le16 reserved_hw_ring_grps; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */ +struct hwrm_func_backing_store_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 qp_max_entries; + __le16 qp_min_qp1_entries; + __le16 qp_max_l2_entries; + __le16 qp_entry_size; + __le16 srq_max_l2_entries; + __le32 srq_max_entries; + __le16 srq_entry_size; + __le16 cq_max_l2_entries; + __le32 cq_max_entries; + __le16 cq_entry_size; + __le16 vnic_max_vnic_entries; + __le16 vnic_max_ring_table_entries; + __le16 vnic_entry_size; + __le32 stat_max_entries; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le32 tqm_min_entries_per_ring; + __le32 tqm_max_entries_per_ring; + __le32 mrav_max_entries; + __le16 mrav_entry_size; + __le16 tim_entry_size; + __le32 tim_max_entries; + u8 unused_0[2]; + u8 tqm_entries_multiple; + u8 valid; +}; + +/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */ +struct hwrm_func_backing_store_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL + __le32 enables; + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le32 cq_num_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le16 qp_entry_size; + __le16 srq_num_l2_entries; + __le16 srq_entry_size; + __le16 cq_num_l2_entries; + __le16 cq_entry_size; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le16 vnic_entry_size; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le16 mrav_entry_size; + __le16 tim_entry_size; +}; + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */ +struct hwrm_func_backing_store_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_BACKING_STORE_QCFG_RESP_FLAGS_PREBOOT_MODE 0x1UL + u8 unused_0[4]; + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_QP 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_SRQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_CQ 0x4UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_VNIC 0x8UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_STAT 0x10UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_MRAV 0x4000UL + #define FUNC_BACKING_STORE_QCFG_RESP_UNUSED_0_TIM 0x8000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le16 srq_num_l2_entries; + __le16 cq_num_l2_entries; + __le32 cq_num_entries; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_func_vlan_qcfg_input (size:192b/24B) */ +struct hwrm_func_vlan_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_vlan_qcfg_output (size:320b/40B) */ +struct hwrm_func_vlan_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 unused_0; + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd2; + __le32 rsvd3; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + u8 unused_3[4]; +}; + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ +struct hwrm_func_vf_vnic_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[2]; + __le32 max_vnic_id_cnt; + __le64 vnic_id_tbl_addr; +}; + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ +struct hwrm_func_vf_vnic_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id_cnt; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_vf_bw_cfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_SFT 0 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_SFT 12 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_LAST FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 +}; + +/* hwrm_func_vf_bw_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_bw_qcfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_SFT 0 +}; + +/* hwrm_func_vf_bw_qcfg_output (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_SFT 0 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_SFT 12 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_LAST FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_if_change_input (size:192b/24B) */ +struct hwrm_func_drv_if_change_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL + __le32 unused; +}; + +/* hwrm_func_drv_if_change_output (size:128b/16B) */ +struct hwrm_func_drv_if_change_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_input (size:448b/56B) */ +struct hwrm_port_phy_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + __le32 enables; + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + __le16 port_id; + __le16 force_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK + u8 auto_duplex; + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH + u8 auto_pause; + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + u8 unused_0; + __le16 auto_link_speed; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL + u8 wirespeed; + #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL + #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL + #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL + u8 unused_1; + __le32 preemphasis; + __le16 eee_link_speed_mask; + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL + u8 unused_2[2]; + __le32 tx_lpi_timer; + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 + __le32 unused_3; +}; + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ +struct hwrm_port_phy_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + u8 code; + #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL + #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL + #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY + u8 unused_0[7]; +}; + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ +struct hwrm_port_phy_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +struct hwrm_port_phy_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK + u8 unused_0; + __le16 link_speed; + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL + u8 pause; + #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL + __le16 support_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL + __le16 force_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK + u8 auto_pause; + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + __le16 auto_link_speed; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL + u8 wirespeed; + #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL + u8 module_status; + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 + u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE + u8 xcvr_pkg_type; + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL + u8 eee_config_phy_addr; + #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL + u8 parallel_detect; + #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL + __le16 link_partner_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL + u8 link_partner_adv_auto_mode; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + u8 link_partner_adv_pause; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + __le16 adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 link_partner_adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le32 xcvr_identifier_type_tx_lpi_timer; + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 + __le16 fec_cfg; + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL + u8 option_flags; + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_port_mac_cfg_input (size:320b/40B) */ +struct hwrm_port_mac_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL + __le32 enables; + #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL + #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL + #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL + #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL + #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL + #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL + #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL + __le16 port_id; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 reserved1; + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 unused_0[3]; +}; + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ +struct hwrm_port_mac_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE + u8 unused_0; + u8 valid; +}; + +/* hwrm_port_mac_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_qcfg_output (size:192b/24B) */ +struct hwrm_port_mac_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 flags; + #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL + #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL + #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 valid; +}; + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL + u8 unused_0[3]; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + u8 unused_1[7]; + u8 valid; +}; + +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518b_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047b_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518b_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + +/* hwrm_port_qstats_input (size:320b/40B) */ +struct hwrm_port_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_output (size:128b/16B) */ +struct hwrm_port_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[3]; + u8 valid; +}; + +/* tx_port_stats_ext (size:2048b/256B) */ +struct tx_port_stats_ext { + __le64 tx_bytes_cos0; + __le64 tx_bytes_cos1; + __le64 tx_bytes_cos2; + __le64 tx_bytes_cos3; + __le64 tx_bytes_cos4; + __le64 tx_bytes_cos5; + __le64 tx_bytes_cos6; + __le64 tx_bytes_cos7; + __le64 tx_packets_cos0; + __le64 tx_packets_cos1; + __le64 tx_packets_cos2; + __le64 tx_packets_cos3; + __le64 tx_packets_cos4; + __le64 tx_packets_cos5; + __le64 tx_packets_cos6; + __le64 tx_packets_cos7; + __le64 pfc_pri0_tx_duration_us; + __le64 pfc_pri0_tx_transitions; + __le64 pfc_pri1_tx_duration_us; + __le64 pfc_pri1_tx_transitions; + __le64 pfc_pri2_tx_duration_us; + __le64 pfc_pri2_tx_transitions; + __le64 pfc_pri3_tx_duration_us; + __le64 pfc_pri3_tx_transitions; + __le64 pfc_pri4_tx_duration_us; + __le64 pfc_pri4_tx_transitions; + __le64 pfc_pri5_tx_duration_us; + __le64 pfc_pri5_tx_transitions; + __le64 pfc_pri6_tx_duration_us; + __le64 pfc_pri6_tx_transitions; + __le64 pfc_pri7_tx_duration_us; + __le64 pfc_pri7_tx_transitions; +}; + +/* rx_port_stats_ext (size:2368b/296B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; + __le64 rx_bytes_cos0; + __le64 rx_bytes_cos1; + __le64 rx_bytes_cos2; + __le64 rx_bytes_cos3; + __le64 rx_bytes_cos4; + __le64 rx_bytes_cos5; + __le64 rx_bytes_cos6; + __le64 rx_bytes_cos7; + __le64 rx_packets_cos0; + __le64 rx_packets_cos1; + __le64 rx_packets_cos2; + __le64 rx_packets_cos3; + __le64 rx_packets_cos4; + __le64 rx_packets_cos5; + __le64 rx_packets_cos6; + __le64 rx_packets_cos7; + __le64 pfc_pri0_rx_duration_us; + __le64 pfc_pri0_rx_transitions; + __le64 pfc_pri1_rx_duration_us; + __le64 pfc_pri1_rx_transitions; + __le64 pfc_pri2_rx_duration_us; + __le64 pfc_pri2_rx_transitions; + __le64 pfc_pri3_rx_duration_us; + __le64 pfc_pri3_rx_transitions; + __le64 pfc_pri4_rx_duration_us; + __le64 pfc_pri4_rx_transitions; + __le64 pfc_pri5_rx_duration_us; + __le64 pfc_pri5_rx_transitions; + __le64 pfc_pri6_rx_duration_us; + __le64 pfc_pri6_rx_transitions; + __le64 pfc_pri7_rx_duration_us; + __le64 pfc_pri7_rx_transitions; +}; + +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[2]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + __le16 total_active_cos_queues; + u8 flags; + #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL + u8 valid; +}; + +/* hwrm_port_lpbk_qstats_input (size:128b/16B) */ +struct hwrm_port_lpbk_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_port_lpbk_qstats_output (size:768b/96B) */ +struct hwrm_port_lpbk_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 lpbk_ucast_frames; + __le64 lpbk_mcast_frames; + __le64 lpbk_bcast_frames; + __le64 lpbk_ucast_bytes; + __le64 lpbk_mcast_bytes; + __le64 lpbk_bcast_bytes; + __le64 tx_stat_discard; + __le64 tx_stat_error; + __le64 rx_stat_discard; + __le64 rx_stat_error; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_clr_stats_input (size:192b/24B) */ +struct hwrm_port_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 flags; + #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL + u8 unused_0[5]; +}; + +/* hwrm_port_clr_stats_output (size:128b/16B) */ +struct hwrm_port_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_ts_query_input (size:192b/24B) */ +struct hwrm_port_ts_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX + __le16 port_id; + u8 unused_0[2]; +}; + +/* hwrm_port_ts_query_output (size:192b/24B) */ +struct hwrm_port_ts_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ptp_msg_ts; + __le16 ptp_msg_seqid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ +struct hwrm_port_phy_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcaps_output (size:192b/24B) */ +struct hwrm_port_phy_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2 + u8 port_cnt; + #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4 + __le16 supported_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + __le16 supported_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + __le16 supported_speeds_eee_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL + __le32 tx_lpi_timer_low; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 + __le32 valid_tx_lpi_timer_high; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 + #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 +}; + +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; + __le32 data[16]; +}; + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_i2c_read_input (size:320b/40B) */ +struct hwrm_port_phy_i2c_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_i2c_read_output (size:640b/80B) */ +struct hwrm_port_phy_i2c_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data[16]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_cfg_input (size:512b/64B) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* hwrm_port_led_cfg_output (size:128b/16B) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID + u8 led0_state; + #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID + u8 led1_state; + #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID + u8 led2_state; + #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID + u8 led3_state; + #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 unused_4[6]; + u8 valid; +}; + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcaps_output (size:384b/48B) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID + u8 led0_group_id; + u8 unused_0; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID + u8 led1_group_id; + u8 unused_1; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID + u8 led2_group_id; + u8 unused_2; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID + u8 led3_group_id; + u8 unused_3; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_4[3]; + u8 valid; +}; + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ +struct hwrm_queue_qportcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX + __le16 port_id; + u8 drv_qmap_cap; + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED + u8 unused_0; +}; + +/* hwrm_queue_qportcfg_output (size:256b/32B) */ +struct hwrm_queue_qportcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 max_configurable_queues; + u8 max_configurable_lossless_queues; + u8 queue_cfg_allowed; + u8 queue_cfg_info; + #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 queue_pfcenable_cfg_allowed; + u8 queue_pri2cos_cfg_allowed; + u8 queue_cos2bw_cfg_allowed; + u8 queue_id0; + u8 queue_id0_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + u8 queue_id1; + u8 queue_id1_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + u8 queue_id2; + u8 queue_id2_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + u8 queue_id3; + u8 queue_id3_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + u8 queue_id4; + u8 queue_id4_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + u8 queue_id5; + u8 queue_id5_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + u8 queue_id6; + u8 queue_id6_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + u8 queue_id7; + u8 queue_id7_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + u8 valid; +}; + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX + __le32 queue_id; +}; + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 queue_len; + u8 service_profile; + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN + u8 queue_cfg_info; + #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0; + u8 valid; +}; + +/* hwrm_queue_cfg_input (size:320b/40B) */ +struct hwrm_queue_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR + __le32 enables; + #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL + #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL + __le32 queue_id; + __le32 dflt_len; + u8 service_profile; + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_queue_cfg_output (size:128b/16B) */ +struct hwrm_queue_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + __le16 port_id; + u8 unused_0[2]; +}; + +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL + u8 port_id; + u8 unused_0[3]; +}; + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 queue_cfg_info; + #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ +struct hwrm_queue_pri2cos_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL + __le32 enables; + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL + u8 port_id; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ +struct hwrm_queue_pri2cos_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ +struct hwrm_queue_cos2bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ +struct hwrm_queue_cos2bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_id0; + u8 unused_0; + __le16 unused_1; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_2[4]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ +struct hwrm_queue_cos2bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_1[5]; +}; + +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ +struct hwrm_queue_cos2bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + u8 unused_1[4]; +}; + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + u8 unused_0[4]; +}; + +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_alloc_input (size:192b/24B) */ +struct hwrm_vnic_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_vnic_alloc_output (size:128b/16B) */ +struct hwrm_vnic_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_free_input (size:192b/24B) */ +struct hwrm_vnic_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_free_output (size:128b/16B) */ +struct hwrm_vnic_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_cfg_input (size:320b/40B) */ +struct hwrm_vnic_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + __le32 enables; + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL + __le16 vnic_id; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + __le16 default_rx_ring_id; + __le16 default_cmpl_ring_id; +}; + +/* hwrm_vnic_cfg_output (size:128b/16B) */ +struct hwrm_vnic_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_qcfg_input (size:256b/32B) */ +struct hwrm_vnic_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define VNIC_QCFG_REQ_ENABLES_VF_ID_VALID 0x1UL + __le32 vnic_id; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCFG_RESP_FLAGS_DEFAULT 0x1UL + #define VNIC_QCFG_RESP_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_QCFG_RESP_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_QCFG_RESP_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ +struct hwrm_vnic_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + u8 unused_0[4]; +}; + +/* hwrm_vnic_qcaps_output (size:192b/24B) */ +struct hwrm_vnic_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ +struct hwrm_vnic_tpa_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL + #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL + __le32 enables; + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL + #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + __le16 vnic_id; + __le16 max_agg_segs; + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX + u8 unused_0[2]; + __le32 max_agg_timer; + __le32 min_agg_len; +}; + +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ +struct hwrm_vnic_tpa_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_tpa_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vnic_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_tpa_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL + #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL + __le16 max_agg_segs; + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX + __le32 max_agg_timer; + __le32 min_agg_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ +struct hwrm_vnic_rss_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 hash_type; + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + __le16 vnic_id; + u8 ring_table_pair_index; + u8 hash_mode_flags; + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; + __le16 rss_ctx_idx; + u8 unused_1[6]; +}; + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ +struct hwrm_vnic_rss_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_ctx_idx; + u8 unused_0[6]; +}; + +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 hash_type; + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL + u8 unused_0[4]; + __le32 hash_key[10]; + u8 hash_mode_flags; + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 unused_1[6]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ +struct hwrm_vnic_plcmodes_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + __le32 enables; + #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + __le32 vnic_id; + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + u8 unused_0[6]; +}; + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ +struct hwrm_vnic_plcmodes_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_DFLT_VNIC 0x40UL + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_alloc_input (size:704b/88B) */ +struct hwrm_ring_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + u8 ring_type; + #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL + #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL + #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL + #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ + u8 unused_0; + __le16 flags; + #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL + __le64 page_tbl_addr; + __le32 fbo; + u8 page_size; + u8 page_tbl_depth; + u8 unused_1[2]; + __le32 length; + __le16 logical_id; + __le16 cmpl_ring_id; + __le16 queue_id; + __le16 rx_buf_size; + __le16 rx_ring_id; + __le16 nq_ring_id; + __le16 ring_arb_cfg; + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + __le16 unused_3; + __le32 reserved3; + __le32 stat_ctx_id; + __le32 reserved4; + __le32 max_bw; + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 + #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + u8 int_mode; + #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL + #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL + #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL + #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL + #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL + u8 unused_4[3]; + __le64 cq_handle; +}; + +/* hwrm_ring_alloc_output (size:128b/16B) */ +struct hwrm_ring_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ring_id; + __le16 logical_ring_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_free_input (size:192b/24B) */ +struct hwrm_ring_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_FREE_REQ_RING_TYPE_TX 0x1UL + #define RING_FREE_REQ_RING_TYPE_RX 0x2UL + #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL + #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_free_output (size:128b/16B) */ +struct hwrm_ring_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ +struct hwrm_ring_aggint_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */ +struct hwrm_ring_aggint_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cmpl_params; + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL + __le32 nq_params; + #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL + __le16 num_cmpl_dma_aggr_min; + __le16 num_cmpl_dma_aggr_max; + __le16 num_cmpl_dma_aggr_during_int_min; + __le16 num_cmpl_dma_aggr_during_int_max; + __le16 cmpl_aggr_dma_tmr_min; + __le16 cmpl_aggr_dma_tmr_max; + __le16 cmpl_aggr_dma_tmr_during_int_min; + __le16 cmpl_aggr_dma_tmr_during_int_max; + __le16 int_lat_tmr_min_min; + __le16 int_lat_tmr_min_max; + __le16 int_lat_tmr_max_min; + __le16 int_lat_tmr_max_max; + __le16 num_cmpl_aggr_int_min; + __le16 num_cmpl_aggr_int_max; + __le16 timer_units; + u8 unused_0[1]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + u8 unused_0[6]; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le16 enables; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL + u8 unused_0[4]; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ +struct hwrm_ring_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 cr; + __le16 rr; + __le16 ar; + __le16 sc; +}; + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ +struct hwrm_ring_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 ring_group_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_grp_free_input (size:192b/24B) */ +struct hwrm_ring_grp_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 ring_group_id; + u8 unused_0[4]; +}; + +/* hwrm_ring_grp_free_output (size:128b/16B) */ +struct hwrm_ring_grp_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4 + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE + __le32 enables; + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + u8 l2_addr[6]; + u8 unused_0[2]; + u8 l2_addr_mask[6]; + __le16 l2_ovlan; + __le16 l2_ovlan_mask; + __le16 l2_ivlan; + __le16 l2_ivlan_mask; + u8 unused_1[2]; + u8 t_l2_addr[6]; + u8 unused_2[2]; + u8 t_l2_addr_mask[6]; + __le16 t_l2_ovlan; + __le16 t_l2_ovlan_mask; + __le16 t_l2_ivlan; + __le16 t_l2_ivlan_mask; + u8 src_type; + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG + u8 unused_3; + __le32 src_id; + u8 tunnel_type; + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_4; + __le16 dst_id; + __le16 mirror_vnic_id; + u8 pri_hint; + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN + u8 unused_5; + __le32 unused_6; + __le64 l2_filter_id_hint; +}; + +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 l2_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 l2_filter_id; +}; + +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ +struct hwrm_cfa_l2_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + __le32 enables; + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + __le64 l2_filter_id; + __le32 dst_id; + __le32 new_mirror_vnic_id; +}; + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ +struct hwrm_cfa_l2_set_rx_mask_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 mask; + #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL + __le64 mc_tbl_addr; + __le32 num_mc_entries; + u8 unused_0[4]; + __le64 vlan_tag_tbl_addr; + __le32 num_vlan_tags; + u8 unused_1[4]; +}; + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ +struct hwrm_cfa_l2_set_rx_mask_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + u8 code; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 num_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 max_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 num_vlan_entries; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + __le32 enables; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + __le64 l2_filter_id; + u8 l2_addr[6]; + __le16 l2_ivlan; + __le32 l3_addr[4]; + __le32 t_l3_addr[4]; + u8 l3_addr_type; + u8 t_l3_addr_type; + u8 tunnel_type; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 tunnel_flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL + __le32 vni; + __le32 dst_vnic_id; + __le32 mirror_vnic_id; +}; + +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tunnel_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tunnel_filter_id; +}; + +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tunnel_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 flags; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_FLAGS_MODIFY_DST 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dest_fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + u8 ver_hlen; + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + u8 tos; + __be16 ip_id; + __be16 flags_frag_offset; + u8 ttl; + u8 protocol; + __be32 src_ip_addr; + __be32 dest_ip_addr; +}; + +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + __be32 ver_tc_flow_label; + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + __be16 payload_len; + u8 next_hdr; + u8 ttl; + __be32 src_ip_addr[4]; + __be32 dest_ip_addr[4]; +}; + +/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */ +struct hwrm_cfa_encap_data_vxlan { + u8 src_mac_addr[6]; + __le16 unused_0; + u8 dst_mac_addr[6]; + u8 num_vlan_tags; + u8 unused_1; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + __le32 l3[10]; + #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL + #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + __be16 src_port; + __be16 dst_port; + __be32 vni; + u8 hdr_rsvd0[3]; + u8 hdr_rsvd1; + u8 hdr_flags; + u8 unused[3]; +}; + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ +struct hwrm_cfa_encap_record_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + u8 encap_type; + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE + u8 unused_0[3]; + __le32 encap_data[20]; +}; + +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 encap_record_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ +struct hwrm_cfa_encap_record_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_record_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + __le32 enables; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL + __le64 l2_filter_id; + u8 src_macaddr[6]; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 dst_id; + __le16 mirror_vnic_id; + u8 tunnel_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 pri_hint; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST + __be32 src_ipaddr[4]; + __be32 src_ipaddr_mask[4]; + __be32 dst_ipaddr[4]; + __be32 dst_ipaddr_mask[4]; + __be16 src_port; + __be16 src_port_mask; + __be16 dst_port; + __be16 dst_port_mask; + __le64 ntuple_filter_id_hint; +}; + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ntuple_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + u8 code; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 ntuple_filter_id; +}; + +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + u8 unused_0[4]; + __le64 ntuple_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */ +struct hwrm_cfa_em_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_LAST CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_BYTE_CTR 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PKT_CTR 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DECAP 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_ENCAP 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DROP 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_METER 0x40UL + __le32 enables; + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_ID 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_MACADDR 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_OVLAN_VID 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IVLAN_VID 0x40UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ETHERTYPE 0x80UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_IPADDR 0x100UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_IPADDR 0x200UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x400UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x800UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_PORT 0x1000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_PORT 0x2000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_ID 0x4000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x8000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ENCAP_RECORD_ID 0x10000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_METER_INSTANCE_ID 0x20000UL + __le64 l2_filter_id; + u8 tunnel_type; + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[3]; + __le32 tunnel_id; + u8 src_macaddr[6]; + __le16 meter_instance_id; + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_LAST CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID + u8 dst_macaddr[6]; + __le16 ovlan_vid; + __le16 ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_LAST CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP + u8 unused_1[2]; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 mirror_vnic_id; + __le32 encap_record_id; + u8 unused_2[4]; +}; + +/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */ +struct hwrm_cfa_em_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 em_filter_id; + __le32 flow_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_em_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 em_filter_id; +}; + +/* hwrm_cfa_em_flow_free_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */ +struct hwrm_cfa_em_flow_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + u8 unused_0[4]; + __le64 em_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_LAST CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 + __le16 reserved1; + __le32 reserved2; + __le32 commit_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID + __le32 commit_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_LAST CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_profile_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_LAST CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 + __le16 meter_profile_id; + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID + __le32 reserved; + __le32 commit_rate; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID + __le32 commit_burst; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_LAST CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ +struct hwrm_cfa_decap_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL + __le32 enables; + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + __be32 tunnel_id; + u8 tunnel_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0; + __le16 unused_1; + u8 src_macaddr[6]; + u8 unused_2[2]; + u8 dst_macaddr[6]; + __be16 ovlan_vid; + __be16 ivlan_vid; + __be16 t_ovlan_vid; + __be16 t_ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 unused_3; + __le32 unused_4; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 l2_ctxt_ref_id; +}; + +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 decap_filter_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_decap_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 decap_filter_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL + __le16 src_fid; + __le32 tunnel_handle; + __le16 action_flags; + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL + __le16 dst_fid; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + __le16 act_meter_id; + __le16 ref_flow_handle; + __be16 ethertype; + __be16 outer_vlan_tci; + __be16 dmac[3]; + __be16 inner_vlan_tci; + __be16 smac[3]; + u8 ip_dst_mask_len; + u8 ip_src_mask_len; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be16 l4_src_port; + __be16 l4_src_port_mask; + __be16 l4_dst_port; + __be16 l4_dst_port_mask; + __be32 nat_ip_address[4]; + __be16 l2_rewrite_dmac[3]; + __be16 nat_port; + __be16 l2_rewrite_smac[3]; + u8 ip_proto; + u8 tunnel_type; + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL +}; + +/* hwrm_cfa_flow_alloc_output (size:256b/32B) */ +struct hwrm_cfa_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flow_handle; + u8 unused_0[2]; + __le32 flow_id; + __le64 ext_flow_handle; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_free_input (size:256b/32B) */ +struct hwrm_cfa_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_free_output (size:256b/32B) */ +struct hwrm_cfa_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet; + __le64 byte; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_info_input (size:256b/32B) */ +struct hwrm_cfa_flow_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0 + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_info_output (size:448b/56B) */ +struct hwrm_cfa_flow_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + u8 profile; + __le16 src_fid; + __le16 dst_fid; + __le16 l2_ctxt_id; + __le64 em_info; + __le64 tcam_info; + __le64 vfp_tcam_info; + __le16 ar_id; + __le16 flow_handle; + __le32 tunnel_handle; + __le16 flow_timer; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_flow_flush_input (size:192b/24B) */ +struct hwrm_cfa_flow_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + u8 unused_0[4]; +}; + +/* hwrm_cfa_flow_flush_output (size:128b/16B) */ +struct hwrm_cfa_flow_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_stats_input (size:640b/80B) */ +struct hwrm_cfa_flow_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_flows; + __le16 flow_handle_0; + __le16 flow_handle_1; + __le16 flow_handle_2; + __le16 flow_handle_3; + __le16 flow_handle_4; + __le16 flow_handle_5; + __le16 flow_handle_6; + __le16 flow_handle_7; + __le16 flow_handle_8; + __le16 flow_handle_9; + u8 unused_0[2]; + __le32 flow_id_0; + __le32 flow_id_1; + __le32 flow_id_2; + __le32 flow_id_3; + __le32 flow_id_4; + __le32 flow_id_5; + __le32 flow_id_6; + __le32 flow_id_7; + __le32 flow_id_8; + __le32 flow_id_9; +}; + +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ +struct hwrm_cfa_flow_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet_0; + __le64 packet_1; + __le64 packet_2; + __le64 packet_3; + __le64 packet_4; + __le64 packet_5; + __le64 packet_6; + __le64 packet_7; + __le64 packet_8; + __le64 packet_9; + __le64 byte_0; + __le64 byte_1; + __le64 byte_2; + __le64 byte_3; + __le64 byte_4; + __le64 byte_5; + __le64 byte_6; + __le64 byte_7; + __le64 byte_8; + __le64 byte_9; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_timer_reset_input (size:256b/32B) */ +struct hwrm_cfa_flow_aging_timer_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_timer_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_cfg_input (size:256b/32B) */ +struct hwrm_cfa_flow_aging_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FLOW_TIMER 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FIN_TIMER 0x2UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_UDP_FLOW_TIMER 0x4UL + u8 flags; + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX + u8 unused_0; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; +}; + +/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcfg_output (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */ +struct hwrm_cfa_flow_aging_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 max_tcp_flow_timer; + __le32 max_tcp_fin_timer; + __le32 max_udp_flow_timer; + __le32 max_aging_flows; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_a_id; + __le16 vf_b_id; + u8 unused_0[4]; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_vf_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_VF_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + __le16 vf_pair_index; + u8 unused_0[2]; + char vf_pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */ +struct hwrm_cfa_vf_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_vf_pair_index; + __le16 vf_a_fid; + __le16 vf_a_index; + __le16 vf_b_fid; + __le16 vf_b_index; + u8 pair_state; + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + u8 unused_0[5]; + char pair_name[32]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_alloc_input (size:576b/72B) */ +struct hwrm_cfa_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 pair_mode; + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MOD 0x5UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MODALL 0x6UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_LAST CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MODALL + u8 unused_0; + __le16 vf_a_id; + u8 host_b_id; + u8 pf_b_id; + __le16 vf_b_id; + u8 port_id; + u8 pri; + __le16 new_pf_fid; + __le32 enables; + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_AB_VALID 0x1UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_BA_VALID 0x2UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_AB_VALID 0x4UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_BA_VALID 0x8UL + char pair_name[32]; + u8 q_ab; + u8 q_ba; + u8 fc_ab; + u8 fc_ba; + u8 unused_1[4]; +}; + +/* hwrm_cfa_pair_alloc_output (size:192b/24B) */ +struct hwrm_cfa_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; +}; + +/* hwrm_cfa_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_REPRE 0x2UL + __le16 pair_index; + u8 pair_pfid; + u8 pair_vfid; + char pair_name[32]; +}; + +/* hwrm_cfa_pair_info_output (size:576b/72B) */ +struct hwrm_cfa_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_pair_index; + __le16 a_fid; + u8 host_a_index; + u8 pf_a_index; + __le16 vf_a_index; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 b_fid; + u8 host_b_index; + u8 pf_b_index; + __le16 vf_b_index; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 pair_mode; + #define CFA_PAIR_INFO_RESP_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_LAST CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR + u8 pair_state; + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + char pair_name[32]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + u8 unused_0[4]; + char vfr_name[32]; +}; + +/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_vfr_free_input (size:384b/48B) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; +}; + +/* hwrm_cfa_vfr_free_output (size:128b/16B) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */ +struct hwrm_cfa_redirect_query_tunnel_type_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 unused_0[6]; +}; + +/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */ +struct hwrm_cfa_redirect_query_tunnel_type_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tunnel_mask; + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NONTUNNEL 0x1UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN 0x2UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NVGRE 0x4UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2GRE 0x8UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPIP 0x10UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_GENEVE 0x20UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_MPLS 0x40UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_STT 0x80UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE 0x100UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN_V4 0x200UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE_V1 0x400UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_ANYTUNNEL 0x800UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2_ETYPE 0x1000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE + u8 unused_0[7]; +}; + +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + __be16 tunnel_dst_port_val; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE + u8 unused_0; + __be16 tunnel_dst_port_val; + u8 unused_1[4]; +}; + +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE + u8 unused_0; + __le16 tunnel_dst_port_id; + u8 unused_1[4]; +}; + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 tpa_pkts; + __le64 tpa_bytes; + __le64 tpa_events; + __le64 tpa_aborts; +}; + +/* ctx_eng_stats (size:512b/64B) */ +struct ctx_eng_stats { + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; +}; + +/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ +struct hwrm_stat_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + u8 stat_ctx_flags; + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ +struct hwrm_stat_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ +struct hwrm_stat_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_free_output (size:128b/16B) */ +struct hwrm_stat_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ +struct hwrm_stat_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_err_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_err_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_eng_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */ +struct hwrm_stat_ctx_eng_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ +struct hwrm_stat_ctx_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ +struct hwrm_stat_ctx_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pcie_stat_size; + u8 unused_0[6]; + __le64 pcie_stat_host_addr; +}; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pcie_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; +}; + +/* hwrm_fw_reset_input (size:192b/24B) */ +struct hwrm_fw_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 host_idx; + u8 flags; + #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_fw_reset_output (size:128b/16B) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_qstatus_input (size:192b/24B) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP + u8 unused_0[7]; +}; + +/* hwrm_fw_qstatus_output (size:128b/16B) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_set_time_input (size:256b/32B) */ +struct hwrm_fw_set_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 year; + #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL + #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL + #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL + #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN + u8 unused_1[4]; +}; + +/* hwrm_fw_set_time_output (size:128b/16B) */ +struct hwrm_fw_set_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_get_time_input (size:128b/16B) */ +struct hwrm_fw_get_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_get_time_output (size:192b/24B) */ +struct hwrm_fw_get_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 year; + #define FW_GET_TIME_RESP_YEAR_UNKNOWN 0x0UL + #define FW_GET_TIME_RESP_YEAR_LAST FW_GET_TIME_RESP_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_GET_TIME_RESP_ZONE_UTC 0x0UL + #define FW_GET_TIME_RESP_ZONE_UNKNOWN 0xffffUL + #define FW_GET_TIME_RESP_ZONE_LAST FW_GET_TIME_RESP_ZONE_UNKNOWN + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_struct_hdr (size:128b/16B) */ +struct hwrm_struct_hdr { + __le16 struct_id; + #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL + #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL + #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL + #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2 + __le16 len; + u8 version; + u8 count; + __le16 subtype; + __le16 next_offset; + #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL + u8 unused_0[6]; +}; + +/* hwrm_struct_data_dcbx_ets (size:256b/32B) */ +struct hwrm_struct_data_dcbx_ets { + u8 destination; + #define STRUCT_DATA_DCBX_ETS_DESTINATION_CONFIGURATION 0x1UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION 0x2UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_LAST STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION + u8 max_tcs; + __le16 unused1; + u8 pri0_to_tc_map; + u8 pri1_to_tc_map; + u8 pri2_to_tc_map; + u8 pri3_to_tc_map; + u8 pri4_to_tc_map; + u8 pri5_to_tc_map; + u8 pri6_to_tc_map; + u8 pri7_to_tc_map; + u8 tc0_to_bw_map; + u8 tc1_to_bw_map; + u8 tc2_to_bw_map; + u8 tc3_to_bw_map; + u8 tc4_to_bw_map; + u8 tc5_to_bw_map; + u8 tc6_to_bw_map; + u8 tc7_to_bw_map; + u8 tc0_to_tsa_map; + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_SP 0x0UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_CBS 0x1UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_ETS 0x2UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC 0xffUL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_LAST STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC + u8 tc1_to_tsa_map; + u8 tc2_to_tsa_map; + u8 tc3_to_tsa_map; + u8 tc4_to_tsa_map; + u8 tc5_to_tsa_map; + u8 tc6_to_tsa_map; + u8 tc7_to_tsa_map; + u8 unused_0[4]; +}; + +/* hwrm_struct_data_dcbx_pfc (size:64b/8B) */ +struct hwrm_struct_data_dcbx_pfc { + u8 pfc_priority_bitmap; + u8 max_pfc_tcs; + u8 mbc; + u8 unused_0[5]; +}; + +/* hwrm_struct_data_dcbx_app (size:64b/8B) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; + u8 protocol_selector; + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT + u8 priority; + u8 valid; + u8 unused_0[3]; +}; + +/* hwrm_struct_data_dcbx_feature_state (size:64b/8B) */ +struct hwrm_struct_data_dcbx_feature_state { + u8 dcbx_mode; + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_DISABLED 0x0UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_IEEE 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE + u8 ets_state; + u8 pfc_state; + u8 app_state; + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ENABLE_BIT_POS 0x7UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_WILLING_BIT_POS 0x6UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS 0x5UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS + u8 unused[3]; + u8 resets; + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_PFC 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_APP 0x4UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE 0x8UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_LAST STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE +}; + +/* hwrm_struct_data_lldp (size:64b/8B) */ +struct hwrm_struct_data_lldp { + u8 admin_state; + #define STRUCT_DATA_LLDP_ADMIN_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_TX 0x1UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_RX 0x2UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE 0x3UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_LAST STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE + u8 port_description_state; + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_LAST STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE + u8 system_name_state; + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE + u8 system_desc_state; + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE + u8 system_cap_state; + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE + u8 mgmt_addr_state; + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_LAST STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE + u8 async_event_notification_state; + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_LAST STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE + u8 unused_0; +}; + +/* hwrm_struct_data_lldp_generic (size:2112b/264B) */ +struct hwrm_struct_data_lldp_generic { + u8 tlv_type; + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_CHASSIS 0x1UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT 0x2UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_NAME 0x3UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_DESCRIPTION 0x4UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_NAME 0x5UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION 0x6UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_LAST STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION + u8 subtype; + u8 length; + u8 unused1[5]; + __le32 tlv_value[64]; +}; + +/* hwrm_struct_data_lldp_device (size:1472b/184B) */ +struct hwrm_struct_data_lldp_device { + __le16 ttl; + u8 mgmt_addr_len; + u8 mgmt_addr_type; + u8 unused_3[4]; + __le32 mgmt_addr[8]; + __le32 system_caps; + u8 intf_num_type; + u8 mgmt_addr_oid_length; + u8 unused_4[2]; + __le32 intf_num; + u8 unused_5[4]; + __le32 mgmt_addr_oid[32]; +}; + +/* hwrm_struct_data_port_description (size:64b/8B) */ +struct hwrm_struct_data_port_description { + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_struct_data_rss_v2 (size:128b/16B) */ +struct hwrm_struct_data_rss_v2 { + __le16 flags; + #define STRUCT_DATA_RSS_V2_FLAGS_HASH_VALID 0x1UL + __le16 rss_ctx_id; + __le16 num_ring_groups; + __le16 hash_type; + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV4 0x1UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV4 0x2UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV4 0x4UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV6 0x8UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV6 0x10UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV6 0x20UL + __le64 hash_key_ring_group_ids; +}; + +/* hwrm_struct_data_power_information (size:192b/24B) */ +struct hwrm_struct_data_power_information { + __le32 bkup_power_info_ver; + __le32 platform_bkup_power_count; + __le32 load_milli_watt; + __le32 bkup_time_milli_seconds; + __le32 bkup_power_status; + __le32 bkup_power_charge_time; +}; + +/* hwrm_fw_set_structured_data_input (size:256b/32B) */ +struct hwrm_fw_set_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + u8 hdr_cnt; + u8 unused_0[5]; +}; + +/* hwrm_fw_set_structured_data_output (size:128b/16B) */ +struct hwrm_fw_set_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_get_structured_data_input (size:256b/32B) */ +struct hwrm_fw_get_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 structure_id; + __le16 subtype; + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL + u8 count; + u8 unused_0; +}; + +/* hwrm_fw_get_structured_data_output (size:128b/16B) */ +struct hwrm_fw_get_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hdr_cnt; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_ipc_msg_input (size:320b/40B) */ +struct hwrm_fw_ipc_msg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FW_IPC_MSG_REQ_ENABLES_COMMAND_ID 0x1UL + #define FW_IPC_MSG_REQ_ENABLES_SRC_PROCESSOR 0x2UL + #define FW_IPC_MSG_REQ_ENABLES_DATA_OFFSET 0x4UL + #define FW_IPC_MSG_REQ_ENABLES_LENGTH 0x8UL + __le16 command_id; + #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG 0x1UL + #define FW_IPC_MSG_REQ_COMMAND_ID_LAST FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG + u8 src_processor; + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_CFW 0x1UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_BONO 0x2UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_APE 0x3UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG 0x4UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_LAST FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG + u8 unused_0; + __le32 data_offset; + __le16 length; + u8 unused_1[2]; + __le64 opaque; +}; + +/* hwrm_fw_ipc_msg_output (size:128b/16B) */ +struct hwrm_fw_ipc_msg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_input (size:256b/32B) */ +struct hwrm_fw_ipc_mailbox_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + u8 unused; + u8 event_id; + u8 port_id; + __le32 event_data1; + __le32 event_data2; + u8 unused_0[4]; +}; + +/* hwrm_fw_ipc_mailbox_output (size:128b/16B) */ +struct hwrm_fw_ipc_mailbox_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_cmd_err (size:64b/8B) */ +struct hwrm_fw_ipc_mailbox_cmd_err { + u8 code; + #define FW_IPC_MAILBOX_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_LAST FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_health_check_input (size:128b/16B) */ +struct hwrm_fw_health_check_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_health_check_output (size:128b/16B) */ +struct hwrm_fw_health_check_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_status; + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_BOOTED 0x1UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH 0x2UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_BOOTED 0x4UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH 0x8UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_BOOTED 0x10UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH 0x20UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SECOND_RT 0x40UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_fw_sync_input (size:192b/24B) */ +struct hwrm_fw_sync_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 sync_action; + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SBI 0x1UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT 0x2UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT 0x4UL + #define FW_SYNC_REQ_SYNC_ACTION_ACTION 0x80000000UL + u8 unused_0[4]; +}; + +/* hwrm_fw_sync_output (size:128b/16B) */ +struct hwrm_fw_sync_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 sync_status; + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_MASK 0xffUL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SFT 0 + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_IN_PROGRESS 0x1UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_TIMEOUT 0x2UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL 0x3UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_LAST FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_ERR 0x40000000UL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_COMPLETE 0x80000000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ +struct hwrm_reject_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_resp_input (size:1024b/128B) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; + u8 unused_0; + u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* hwrm_fwd_resp_output (size:128b/16B) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ +struct hwrm_fwd_async_event_cmpl_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_async_event_target_id; + u8 unused_0[6]; + __le32 encap_async_event_cmpl[4]; +}; + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ +struct hwrm_fwd_async_event_cmpl_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_temp_monitor_query_input (size:128b/16B) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_temp_monitor_query_output (size:128b/16B) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_alloc_input (size:512b/64B) */ +struct hwrm_wol_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL + __le16 port_id; + u8 wol_type; + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID + u8 unused_0[5]; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_buf_size; + __le16 pattern_mask_size; + u8 unused_1[4]; + __le64 pattern_buf_addr; + __le64 pattern_mask_addr; +}; + +/* hwrm_wol_filter_alloc_output (size:128b/16B) */ +struct hwrm_wol_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_free_input (size:256b/32B) */ +struct hwrm_wol_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL + __le32 enables; + #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL + __le16 port_id; + u8 wol_filter_id; + u8 unused_0[5]; +}; + +/* hwrm_wol_filter_free_output (size:128b/16B) */ +struct hwrm_wol_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_wol_filter_qcfg_input (size:448b/56B) */ +struct hwrm_wol_filter_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 handle; + u8 unused_0[4]; + __le64 pattern_buf_addr; + __le16 pattern_buf_size; + u8 unused_1[6]; + __le64 pattern_mask_addr; + __le16 pattern_mask_size; + u8 unused_2[6]; +}; + +/* hwrm_wol_filter_qcfg_output (size:256b/32B) */ +struct hwrm_wol_filter_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_handle; + u8 wol_filter_id; + u8 wol_type; + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID + __le32 unused_0; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_size; + __le16 pattern_mask_size; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_wol_reason_qcfg_input (size:320b/40B) */ +struct hwrm_wol_reason_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 wol_pkt_buf_addr; + __le16 wol_pkt_buf_size; + u8 unused_1[6]; +}; + +/* hwrm_wol_reason_qcfg_output (size:128b/16B) */ +struct hwrm_wol_reason_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 wol_reason; + #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL + #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID + u8 wol_pkt_len; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_dbg_read_direct_input (size:256b/32B) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* hwrm_dbg_read_direct_output (size:128b/16B) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_write_direct_input (size:448b/56B) */ +struct hwrm_dbg_write_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 write_addr; + __le32 write_len32; + __le32 write_data[8]; +}; + +/* hwrm_dbg_write_direct_output (size:128b/16B) */ +struct hwrm_dbg_write_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_read_indirect_input (size:320b/40B) */ +struct hwrm_dbg_read_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_dest_addr_len; + u8 indirect_access_type; + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; +}; + +/* hwrm_dbg_read_indirect_output (size:128b/16B) */ +struct hwrm_dbg_read_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_write_indirect_input (size:512b/64B) */ +struct hwrm_dbg_write_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 indirect_access_type; + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; + u8 unused_1[4]; + __le32 write_data[8]; +}; + +/* hwrm_dbg_write_indirect_output (size:128b/16B) */ +struct hwrm_dbg_write_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_dump_input (size:320b/40B) */ +struct hwrm_dbg_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 handle; + u8 unused_0[4]; + __le64 host_dbg_dump_addr; + __le64 host_dbg_dump_addr_len; +}; + +/* hwrm_dbg_dump_output (size:192b/24B) */ +struct hwrm_dbg_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nexthandle; + __le32 dbg_data_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_erase_nvm_input (size:192b/24B) */ +struct hwrm_dbg_erase_nvm_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define DBG_ERASE_NVM_REQ_FLAGS_ERASE_ALL 0x1UL + u8 unused_0[6]; +}; + +/* hwrm_dbg_erase_nvm_output (size:128b/16B) */ +struct hwrm_dbg_erase_nvm_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_cfg_input (size:192b/24B) */ +struct hwrm_dbg_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define DBG_CFG_REQ_FLAGS_UART_LOG 0x1UL + #define DBG_CFG_REQ_FLAGS_UART_LOG_SECONDARY 0x2UL + u8 unused_0[4]; +}; + +/* hwrm_dbg_cfg_output (size:128b/16B) */ +struct hwrm_dbg_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* coredump_segment_record (size:128b/16B) */ +struct coredump_segment_record { + __le16 component_id; + __le16 segment_id; + __le16 max_instances; + u8 version_hi; + u8 version_low; + u8 seg_flags; + u8 unused_0[7]; +}; + +/* hwrm_dbg_coredump_list_input (size:256b/32B) */ +struct hwrm_dbg_coredump_list_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 seq_no; + u8 unused_0[2]; +}; + +/* hwrm_dbg_coredump_list_output (size:128b/16B) */ +struct hwrm_dbg_coredump_list_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 total_segments; + __le16 data_len; + u8 unused_1; + u8 valid; +}; + +/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */ +struct hwrm_dbg_coredump_initiate_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_0; + u8 seg_flags; + u8 unused_1[7]; +}; + +/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */ +struct hwrm_dbg_coredump_initiate_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* coredump_data_hdr (size:128b/16B) */ +struct coredump_data_hdr { + __le32 address; + __le32 flags_length; + __le32 instance; + __le32 next_offset; +}; + +/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */ +struct hwrm_dbg_coredump_retrieve_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le32 unused_0; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_1; + u8 seg_flags; + u8 unused_2; + __le16 unused_3; + __le32 unused_4; + __le32 seq_no; + __le32 unused_5; +}; + +/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */ +struct hwrm_dbg_coredump_retrieve_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 data_len; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_dbg_i2c_cmd_input (size:320b/40B) */ +struct hwrm_dbg_i2c_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 read_size; + __le16 write_size; + u8 chnl_id; + u8 options; + #define DBG_I2C_CMD_REQ_OPTIONS_10_BIT_ADDRESSING 0x1UL + #define DBG_I2C_CMD_REQ_OPTIONS_FAST_MODE 0x2UL + __le16 slave_addr; + u8 xfer_mode; + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_READ 0x0UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE 0x1UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ 0x2UL + #define DBG_I2C_CMD_REQ_XFER_MODE_LAST DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ + u8 unused_1[7]; +}; + +/* hwrm_dbg_i2c_cmd_output (size:128b/16B) */ +struct hwrm_dbg_i2c_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_fw_cli_input (size:1024b/128B) */ +struct hwrm_dbg_fw_cli_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 cli_cmd_len; + u8 unused_0[2]; + u8 cli_cmd[96]; +}; + +/* hwrm_dbg_fw_cli_output (size:128b/16B) */ +struct hwrm_dbg_fw_cli_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cli_data_len; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_dbg_ring_info_get_input (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX + u8 unused_0[3]; + __le32 fw_ring_id; +}; + +/* hwrm_dbg_ring_info_get_output (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 producer_index; + __le32 consumer_index; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */ +struct hwrm_nvm_raw_write_blk_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le32 dest_addr; + __le32 len; +}; + +/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */ +struct hwrm_nvm_raw_write_blk_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_read_input (size:320b/40B) */ +struct hwrm_nvm_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_read_output (size:128b/16B) */ +struct hwrm_nvm_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_raw_dump_input (size:256b/32B) */ +struct hwrm_nvm_raw_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 offset; + __le32 len; +}; + +/* hwrm_nvm_raw_dump_output (size:128b/16B) */ +struct hwrm_nvm_raw_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ +struct hwrm_nvm_get_dir_entries_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; +}; + +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ +struct hwrm_nvm_get_dir_entries_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dir_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ +struct hwrm_nvm_get_dir_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 entries; + __le32 entry_length; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_write_input (size:384b/48B) */ +struct hwrm_nvm_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 dir_data_length; + __le16 option; + __le16 flags; + #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + __le32 dir_item_length; + __le32 unused_0; +}; + +/* hwrm_nvm_write_output (size:128b/16B) */ +struct hwrm_nvm_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le16 dir_idx; + u8 unused_0; + u8 valid; +}; + +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ +struct hwrm_nvm_write_cmd_err { + u8 code; + #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; +}; + +/* hwrm_nvm_modify_input (size:320b/40B) */ +struct hwrm_nvm_modify_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_modify_output (size:128b/16B) */ +struct hwrm_nvm_modify_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL + __le16 dir_idx; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 opt_ordinal; + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT + u8 unused_0[3]; +}; + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le32 dir_data_length; + __le32 fw_ver; + __le16 dir_ordinal; + __le16 dir_idx; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ +struct hwrm_nvm_erase_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_idx; + u8 unused_0[6]; +}; + +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_erase_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dev_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_get_dev_info_output (size:256b/32B) */ +struct hwrm_nvm_get_dev_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 manufacturer_id; + __le16 device_id; + __le32 sector_size; + __le32 nvram_size; + __le32 reserved_size; + __le32 available_size; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_mod_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL + __le16 dir_idx; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 checksum; +}; + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_mod_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ +struct hwrm_nvm_verify_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 unused_0[2]; +}; + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ +struct hwrm_nvm_verify_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_install_update_input (size:192b/24B) */ +struct hwrm_nvm_install_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 install_type; + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL + __le16 flags; + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + u8 unused_0[2]; +}; + +/* hwrm_nvm_install_update_output (size:192b/24B) */ +struct hwrm_nvm_install_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 installed_items; + u8 result; + #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS + u8 problem_item; + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE + u8 reset_required; + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ +struct hwrm_nvm_install_update_cmd_err { + u8 code; + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; +}; + +/* hwrm_nvm_flush_input (size:128b/16B) */ +struct hwrm_nvm_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_flush_output (size:128b/16B) */ +struct hwrm_nvm_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_flush_cmd_err (size:64b/8B) */ +struct hwrm_nvm_flush_cmd_err { + u8 code; + #define NVM_FLUSH_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FLUSH_CMD_ERR_CODE_FAIL 0x1UL + #define NVM_FLUSH_CMD_ERR_CODE_LAST NVM_FLUSH_CMD_ERR_CODE_FAIL + u8 unused_0[7]; +}; + +/* hwrm_nvm_get_variable_input (size:320b/40B) */ +struct hwrm_nvm_get_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL + u8 unused_0; +}; + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ +struct hwrm_nvm_get_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_get_variable_cmd_err { + u8 code; + #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + u8 unused_0[7]; +}; + +/* hwrm_nvm_set_variable_input (size:320b/40B) */ +struct hwrm_nvm_set_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + u8 unused_0; +}; + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ +struct hwrm_nvm_set_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_set_variable_cmd_err { + u8 code; + #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + u8 unused_0[7]; +}; + +/* hwrm_nvm_validate_option_input (size:320b/40B) */ +struct hwrm_nvm_validate_option_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_LAST NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 unused_0[2]; +}; + +/* hwrm_nvm_validate_option_output (size:128b/16B) */ +struct hwrm_nvm_validate_option_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_VALIDATE_OPTION_RESP_RESULT_NOT_MATCH 0x0UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_MATCH 0x1UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_LAST NVM_VALIDATE_OPTION_RESP_RESULT_MATCH + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */ +struct hwrm_nvm_validate_option_cmd_err { + u8 code; + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_nvm_factory_defaults_input (size:192b/24B) */ +struct hwrm_nvm_factory_defaults_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mode; + #define NVM_FACTORY_DEFAULTS_REQ_MODE_RESTORE 0x0UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE 0x1UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_LAST NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE + u8 unused_0[7]; +}; + +/* hwrm_nvm_factory_defaults_output (size:128b/16B) */ +struct hwrm_nvm_factory_defaults_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_OK 0x0UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_RESTORE_OK 0x1UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY 0x2UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_LAST NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */ +struct hwrm_nvm_factory_defaults_cmd_err { + u8 code; + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG 0x1UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG 0x2UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG + u8 unused_0[7]; +}; + +/* hwrm_selftest_qlist_input (size:128b/16B) */ +struct hwrm_selftest_qlist_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_qlist_output (size:2240b/280B) */ +struct hwrm_selftest_qlist_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_tests; + u8 available_tests; + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 offline_tests; + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; + __le16 test_timeout; + u8 unused_1[2]; + char test0_name[32]; + char test1_name[32]; + char test2_name[32]; + char test3_name[32]; + char test4_name[32]; + char test5_name[32]; + char test6_name[32]; + char test7_name[32]; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_selftest_exec_input (size:192b/24B) */ +struct hwrm_selftest_exec_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[7]; +}; + +/* hwrm_selftest_exec_output (size:128b/16B) */ +struct hwrm_selftest_exec_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 requested_tests; + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 test_success; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_selftest_irq_input (size:128b/16B) */ +struct hwrm_selftest_irq_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_irq_output (size:128b/16B) */ +struct hwrm_selftest_irq_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_selftest_retrieve_serdes_data_input (size:256b/32B) */ +struct hwrm_selftest_retrieve_serdes_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le32 resp_data_offset; + __le16 data_len; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0x7UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_EYE_PROJECTION 0x8UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 options; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_MASK 0xfUL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_HORIZONTAL (0x0UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL (0x1UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE 0x20UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LEFT_TOP (0x0UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM (0x1UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_MASK 0xc0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_SFT 6 +}; + +/* hwrm_selftest_retrieve_serdes_data_output (size:128b/16B) */ +struct hwrm_selftest_retrieve_serdes_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 copied_data_len; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_oem_cmd_input (size:1024b/128B) */ +struct hwrm_oem_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 IANA; + __le32 unused_0; + __le32 oem_data[26]; +}; + +/* hwrm_oem_cmd_output (size:1344b/168B) */ +struct hwrm_oem_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 IANA; + __le32 unused_0; + __le32 oem_data[36]; + u8 unused_1[7]; + u8 valid; +}; + +#endif /* _BNXT_HSI_H_ */ diff --git a/src/drivers/net/eepro100.c b/src/drivers/net/eepro100.c index 1046cda39..1a802b590 100644 --- a/src/drivers/net/eepro100.c +++ b/src/drivers/net/eepro100.c @@ -93,7 +93,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); /* * Debugging levels: - * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(), + * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(), * TX overflow, corrupted packets, ... * - DBG2() is for successful events, like packet received, * packet transmitted, and other general notifications. @@ -335,7 +335,7 @@ static int ifec_net_open ( struct net_device *netdev ) ifec_mdio_setup ( netdev, options ); /* Prepare MAC address w/ Individual Address Setup (ias) command.*/ - ias = malloc_dma ( sizeof ( *ias ), CB_ALIGN ); + ias = malloc_phys ( sizeof ( *ias ), CB_ALIGN ); if ( !ias ) { rc = -ENOMEM; goto error; @@ -345,7 +345,7 @@ static int ifec_net_open ( struct net_device *netdev ) memcpy ( ias->ia, netdev->ll_addr, ETH_ALEN ); /* Prepare operating parameters w/ a configure command. */ - cfg = malloc_dma ( sizeof ( *cfg ), CB_ALIGN ); + cfg = malloc_phys ( sizeof ( *cfg ), CB_ALIGN ); if ( !cfg ) { rc = -ENOMEM; goto error; @@ -367,8 +367,8 @@ static int ifec_net_open ( struct net_device *netdev ) DBG ( "Failed to initiate!\n" ); goto error; } - free_dma ( ias, sizeof ( *ias ) ); - free_dma ( cfg, sizeof ( *cfg ) ); + free_phys ( ias, sizeof ( *ias ) ); + free_phys ( cfg, sizeof ( *cfg ) ); DBG2 ( "cfg " ); /* Enable rx by sending ring address to card */ @@ -381,8 +381,8 @@ static int ifec_net_open ( struct net_device *netdev ) return 0; error: - free_dma ( cfg, sizeof ( *cfg ) ); - free_dma ( ias, sizeof ( *ias ) ); + free_phys ( cfg, sizeof ( *cfg ) ); + free_phys ( ias, sizeof ( *ias ) ); ifec_free ( netdev ); ifec_reset ( netdev ); return rc; @@ -703,7 +703,7 @@ static void ifec_free ( struct net_device *netdev ) } /* free TX ring buffer */ - free_dma ( priv->tcbs, TX_RING_BYTES ); + free_phys ( priv->tcbs, TX_RING_BYTES ); priv->tcbs = NULL; } @@ -1025,7 +1025,7 @@ static int ifec_tx_setup ( struct net_device *netdev ) DBGP ( "ifec_tx_setup\n" ); /* allocate tx ring */ - priv->tcbs = malloc_dma ( TX_RING_BYTES, CB_ALIGN ); + priv->tcbs = malloc_phys ( TX_RING_BYTES, CB_ALIGN ); if ( !priv->tcbs ) { DBG ( "TX-ring allocation failed\n" ); return -ENOMEM; diff --git a/src/drivers/net/efi/nii.c b/src/drivers/net/efi/nii.c index 2d87e0c63..b9f34650e 100644 --- a/src/drivers/net/efi/nii.c +++ b/src/drivers/net/efi/nii.c @@ -788,6 +788,20 @@ static int nii_initialise_flags ( struct nii_nic *nii, unsigned int flags ) { return rc; } +/** + * Initialise UNDI with cable detection + * + * @v nii NII NIC + * @ret rc Return status code + */ +static int nii_initialise_cable ( struct nii_nic *nii ) { + unsigned int flags; + + /* Initialise UNDI */ + flags = PXE_OPFLAGS_INITIALIZE_DETECT_CABLE; + return nii_initialise_flags ( nii, flags ); +} + /** * Initialise UNDI * @@ -961,9 +975,8 @@ static int nii_transmit ( struct net_device *netdev, /* Construct parameter block */ memset ( &cpb, 0, sizeof ( cpb ) ); - cpb.FrameAddr = virt_to_bus ( iobuf->data ); + cpb.FrameAddr = ( ( intptr_t ) iobuf->data ); cpb.DataLen = iob_len ( iobuf ); - cpb.MediaheaderLen = netdev->ll_protocol->ll_header_len; /* Transmit packet */ op = NII_OP ( PXE_OPCODE_TRANSMIT, @@ -1030,7 +1043,7 @@ static void nii_poll_rx ( struct net_device *netdev ) { /* Construct parameter block */ memset ( &cpb, 0, sizeof ( cpb ) ); - cpb.BufferAddr = virt_to_bus ( nii->rxbuf->data ); + cpb.BufferAddr = ( ( intptr_t ) nii->rxbuf->data ); cpb.BufferLen = iob_tailroom ( nii->rxbuf ); /* Issue command */ @@ -1122,7 +1135,6 @@ static void nii_poll ( struct net_device *netdev ) { */ static int nii_open ( struct net_device *netdev ) { struct nii_nic *nii = netdev->priv; - unsigned int flags; int rc; /* Initialise NIC @@ -1140,15 +1152,21 @@ static int nii_open ( struct net_device *netdev ) { * presence during initialisation on links that are physically * slow to reach link-up. * - * Attempt to work around both of these problems by requesting - * cable detection at this point if any only if the driver is - * not capable of reporting link status changes at runtime via - * PXE_OPCODE_GET_STATUS. + * Attempt to work around both of these problems by first + * attempting to initialise with cable presence detection, + * then falling back to initialising without cable presence + * detection. */ - flags = ( nii->media ? PXE_OPFLAGS_INITIALIZE_DO_NOT_DETECT_CABLE - : PXE_OPFLAGS_INITIALIZE_DETECT_CABLE ); - if ( ( rc = nii_initialise_flags ( nii, flags ) ) != 0 ) - goto err_initialise; + if ( ( rc = nii_initialise_cable ( nii ) ) != 0 ) { + DBGC ( nii, "NII %s could not initialise with cable " + "detection: %s\n", nii->dev.name, strerror ( rc ) ); + if ( ( rc = nii_initialise ( nii ) ) != 0 ) { + DBGC ( nii, "NII %s could not initialise without " + "cable detection: %s\n", + nii->dev.name, strerror ( rc ) ); + goto err_initialise; + } + } /* Attempt to set station address */ if ( ( rc = nii_set_station_address ( nii, netdev ) ) != 0 ) { diff --git a/src/drivers/net/efi/snpnet.c b/src/drivers/net/efi/snpnet.c index 536248bca..fb5240277 100644 --- a/src/drivers/net/efi/snpnet.c +++ b/src/drivers/net/efi/snpnet.c @@ -27,6 +27,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include #include +#include #include #include #include @@ -64,6 +65,12 @@ struct snp_nic { /** Maximum number of received packets per poll */ #define SNP_RX_QUOTA 4 +/** Maximum initialisation retry count */ +#define SNP_INITIALIZE_RETRY_MAX 10 + +/** Delay between each initialisation retry */ +#define SNP_INITIALIZE_RETRY_DELAY_MS 10 + /** * Format SNP MAC address (for debugging) * @@ -335,7 +342,9 @@ static int snpnet_rx_filters ( struct net_device *netdev ) { static int snpnet_open ( struct net_device *netdev ) { struct snp_nic *snp = netdev->priv; EFI_MAC_ADDRESS *mac = ( ( void * ) netdev->ll_addr ); + EFI_SIMPLE_NETWORK_MODE *mode = snp->snp->Mode; EFI_STATUS efirc; + unsigned int retry; int rc; /* Try setting MAC address (before initialising) */ @@ -346,13 +355,46 @@ static int snpnet_open ( struct net_device *netdev ) { /* Ignore error */ } - /* Initialise NIC */ - if ( ( efirc = snp->snp->Initialize ( snp->snp, 0, 0 ) ) != 0 ) { - rc = -EEFI ( efirc ); - snpnet_dump_mode ( netdev ); - DBGC ( snp, "SNP %s could not initialise: %s\n", - netdev->name, strerror ( rc ) ); - return rc; + /* Initialise NIC, retrying multiple times if link stays down */ + for ( retry = 0 ; ; ) { + + /* Initialise NIC */ + if ( ( efirc = snp->snp->Initialize ( snp->snp, + 0, 0 ) ) != 0 ) { + rc = -EEFI ( efirc ); + snpnet_dump_mode ( netdev ); + DBGC ( snp, "SNP %s could not initialise: %s\n", + netdev->name, strerror ( rc ) ); + return rc; + } + + /* Stop if we have link up (or no link detection capability) */ + if ( ( ! mode->MediaPresentSupported ) || mode->MediaPresent ) + break; + + /* Stop if we have exceeded our retry count. This is + * not a failure; it is plausible that we genuinely do + * not have link up. + */ + if ( ++retry >= SNP_INITIALIZE_RETRY_MAX ) + break; + DBGC ( snp, "SNP %s retrying initialisation (retry %d)\n", + netdev->name, retry ); + + /* Delay to allow time for link to establish */ + mdelay ( SNP_INITIALIZE_RETRY_DELAY_MS ); + + /* Shut down and retry; this is sometimes necessary in + * order to persuade the underlying SNP driver to + * actually update the link state. + */ + if ( ( efirc = snp->snp->Shutdown ( snp->snp ) ) != 0 ) { + rc = -EEFI ( efirc ); + snpnet_dump_mode ( netdev ); + DBGC ( snp, "SNP %s could not shut down: %s\n", + netdev->name, strerror ( rc ) ); + return rc; + } } /* Try setting MAC address (after initialising) */ diff --git a/src/drivers/net/ena.c b/src/drivers/net/ena.c index 0f25c0beb..85da1c090 100644 --- a/src/drivers/net/ena.c +++ b/src/drivers/net/ena.c @@ -65,35 +65,59 @@ static const char * ena_direction ( unsigned int direction ) { */ /** - * Reset hardware + * Wait for reset operation to be acknowledged * * @v ena ENA device + * @v expected Expected reset state * @ret rc Return status code */ -static int ena_reset ( struct ena_nic *ena ) { +static int ena_reset_wait ( struct ena_nic *ena, uint32_t expected ) { uint32_t stat; unsigned int i; - /* Trigger reset */ - writel ( ENA_CTRL_RESET, ( ena->regs + ENA_CTRL ) ); - /* Wait for reset to complete */ for ( i = 0 ; i < ENA_RESET_MAX_WAIT_MS ; i++ ) { /* Check if device is ready */ stat = readl ( ena->regs + ENA_STAT ); - if ( stat & ENA_STAT_READY ) + if ( ( stat & ENA_STAT_RESET ) == expected ) return 0; /* Delay */ mdelay ( 1 ); } - DBGC ( ena, "ENA %p timed out waiting for reset (status %#08x)\n", - ena, stat ); + DBGC ( ena, "ENA %p timed out waiting for reset status %#08x " + "(got %#08x)\n", ena, expected, stat ); return -ETIMEDOUT; } +/** + * Reset hardware + * + * @v ena ENA device + * @ret rc Return status code + */ +static int ena_reset ( struct ena_nic *ena ) { + int rc; + + /* Trigger reset */ + writel ( ENA_CTRL_RESET, ( ena->regs + ENA_CTRL ) ); + + /* Wait for reset to take effect */ + if ( ( rc = ena_reset_wait ( ena, ENA_STAT_RESET ) ) != 0 ) + return rc; + + /* Clear reset */ + writel ( 0, ( ena->regs + ENA_CTRL ) ); + + /* Wait for reset to clear */ + if ( ( rc = ena_reset_wait ( ena, 0 ) ) != 0 ) + return rc; + + return 0; +} + /****************************************************************************** * * Admin queue @@ -164,7 +188,7 @@ static int ena_create_admin ( struct ena_nic *ena ) { int rc; /* Allocate admin completion queue */ - ena->acq.rsp = malloc_dma ( acq_len, acq_len ); + ena->acq.rsp = malloc_phys ( acq_len, acq_len ); if ( ! ena->acq.rsp ) { rc = -ENOMEM; goto err_alloc_acq; @@ -172,7 +196,7 @@ static int ena_create_admin ( struct ena_nic *ena ) { memset ( ena->acq.rsp, 0, acq_len ); /* Allocate admin queue */ - ena->aq.req = malloc_dma ( aq_len, aq_len ); + ena->aq.req = malloc_phys ( aq_len, aq_len ); if ( ! ena->aq.req ) { rc = -ENOMEM; goto err_alloc_aq; @@ -196,9 +220,9 @@ static int ena_create_admin ( struct ena_nic *ena ) { ena_clear_caps ( ena, ENA_AQ_CAPS ); ena_clear_caps ( ena, ENA_ACQ_CAPS ); - free_dma ( ena->aq.req, aq_len ); + free_phys ( ena->aq.req, aq_len ); err_alloc_aq: - free_dma ( ena->acq.rsp, acq_len ); + free_phys ( ena->acq.rsp, acq_len ); err_alloc_acq: return rc; } @@ -218,8 +242,8 @@ static void ena_destroy_admin ( struct ena_nic *ena ) { wmb(); /* Free queues */ - free_dma ( ena->aq.req, aq_len ); - free_dma ( ena->acq.rsp, acq_len ); + free_phys ( ena->aq.req, aq_len ); + free_phys ( ena->acq.rsp, acq_len ); DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena ); } @@ -338,7 +362,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq, int rc; /* Allocate submission queue entries */ - sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN ); + sq->sqe.raw = malloc_phys ( sq->len, ENA_ALIGN ); if ( ! sq->sqe.raw ) { rc = -ENOMEM; goto err_alloc; @@ -375,7 +399,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq, return 0; err_admin: - free_dma ( sq->sqe.raw, sq->len ); + free_phys ( sq->sqe.raw, sq->len ); err_alloc: return rc; } @@ -403,7 +427,7 @@ static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) { return rc; /* Free submission queue entries */ - free_dma ( sq->sqe.raw, sq->len ); + free_phys ( sq->sqe.raw, sq->len ); DBGC ( ena, "ENA %p %s SQ%d destroyed\n", ena, ena_direction ( sq->direction ), sq->id ); @@ -423,7 +447,7 @@ static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) { int rc; /* Allocate completion queue entries */ - cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN ); + cq->cqe.raw = malloc_phys ( cq->len, ENA_ALIGN ); if ( ! cq->cqe.raw ) { rc = -ENOMEM; goto err_alloc; @@ -461,7 +485,7 @@ static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) { return 0; err_admin: - free_dma ( cq->cqe.raw, cq->len ); + free_phys ( cq->cqe.raw, cq->len ); err_alloc: return rc; } @@ -488,7 +512,7 @@ static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) { return rc; /* Free completion queue entries */ - free_dma ( cq->cqe.raw, cq->len ); + free_phys ( cq->cqe.raw, cq->len ); DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id ); return 0; @@ -933,7 +957,7 @@ static int ena_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - ena->regs = ioremap ( pci->membase, ENA_BAR_SIZE ); + ena->regs = pci_ioremap ( pci, pci->membase, ENA_BAR_SIZE ); if ( ! ena->regs ) { rc = -ENODEV; goto err_ioremap; diff --git a/src/drivers/net/ena.h b/src/drivers/net/ena.h index 0496fc6bd..676c5b878 100644 --- a/src/drivers/net/ena.h +++ b/src/drivers/net/ena.h @@ -66,7 +66,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); /** Device status register */ #define ENA_STAT 0x58 -#define ENA_STAT_READY 0x00000001UL /**< Ready */ +#define ENA_STAT_RESET 0x00000008UL /**< Reset in progress */ /** Admin queue entry header */ struct ena_aq_header { diff --git a/src/drivers/net/etherfabric.c b/src/drivers/net/etherfabric.c index 2cd41d4ca..e43d4336e 100644 --- a/src/drivers/net/etherfabric.c +++ b/src/drivers/net/etherfabric.c @@ -3025,7 +3025,7 @@ falcon_free_special_buffer ( void *p ) { /* We don't bother cleaning up the buffer table entries - * we're hardly limited */ - free_dma ( p, EFAB_BUF_ALIGN ); + free_phys ( p, EFAB_BUF_ALIGN ); } static void* @@ -3038,7 +3038,7 @@ falcon_alloc_special_buffer ( struct efab_nic *efab, int bytes, unsigned long dma_addr; /* Allocate the buffer, aligned on a buffer address boundary */ - buffer = malloc_dma ( bytes, EFAB_BUF_ALIGN ); + buffer = malloc_phys ( bytes, EFAB_BUF_ALIGN ); if ( ! buffer ) return NULL; @@ -4150,7 +4150,7 @@ efab_probe ( struct pci_device *pci ) /* Get iobase/membase */ mmio_start = pci_bar_start ( pci, PCI_BASE_ADDRESS_2 ); mmio_len = pci_bar_size ( pci, PCI_BASE_ADDRESS_2 ); - efab->membase = ioremap ( mmio_start, mmio_len ); + efab->membase = pci_ioremap ( pci, mmio_start, mmio_len ); EFAB_TRACE ( "BAR of %lx bytes at phys %lx mapped at %p\n", mmio_len, mmio_start, efab->membase ); diff --git a/src/drivers/net/exanic.c b/src/drivers/net/exanic.c index 287e14e8d..aaa6a28a1 100644 --- a/src/drivers/net/exanic.c +++ b/src/drivers/net/exanic.c @@ -800,7 +800,7 @@ static int exanic_probe ( struct pci_device *pci ) { /* Map registers */ regs_bar_start = pci_bar_start ( pci, EXANIC_REGS_BAR ); - exanic->regs = ioremap ( regs_bar_start, EXANIC_REGS_LEN ); + exanic->regs = pci_ioremap ( pci, regs_bar_start, EXANIC_REGS_LEN ); if ( ! exanic->regs ) { rc = -ENODEV; goto err_ioremap_regs; @@ -824,14 +824,14 @@ static int exanic_probe ( struct pci_device *pci ) { /* Map transmit region */ tx_bar_start = pci_bar_start ( pci, EXANIC_TX_BAR ); tx_bar_len = pci_bar_size ( pci, EXANIC_TX_BAR ); - exanic->tx = ioremap ( tx_bar_start, tx_bar_len ); + exanic->tx = pci_ioremap ( pci, tx_bar_start, tx_bar_len ); if ( ! exanic->tx ) { rc = -ENODEV; goto err_ioremap_tx; } /* Allocate transmit feedback region (shared between all ports) */ - exanic->txf = malloc_dma ( EXANIC_TXF_LEN, EXANIC_ALIGN ); + exanic->txf = malloc_phys ( EXANIC_TXF_LEN, EXANIC_ALIGN ); if ( ! exanic->txf ) { rc = -ENOMEM; goto err_alloc_txf; @@ -853,7 +853,7 @@ static int exanic_probe ( struct pci_device *pci ) { for ( i-- ; i >= 0 ; i-- ) exanic_remove_port ( exanic, i ); exanic_reset ( exanic ); - free_dma ( exanic->txf, EXANIC_TXF_LEN ); + free_phys ( exanic->txf, EXANIC_TXF_LEN ); err_alloc_txf: iounmap ( exanic->tx ); err_ioremap_tx: @@ -882,7 +882,7 @@ static void exanic_remove ( struct pci_device *pci ) { exanic_reset ( exanic ); /* Free transmit feedback region */ - free_dma ( exanic->txf, EXANIC_TXF_LEN ); + free_phys ( exanic->txf, EXANIC_TXF_LEN ); /* Unmap transmit region */ iounmap ( exanic->tx ); diff --git a/src/drivers/net/forcedeth.c b/src/drivers/net/forcedeth.c index 7f044b192..7fba08a08 100644 --- a/src/drivers/net/forcedeth.c +++ b/src/drivers/net/forcedeth.c @@ -267,7 +267,7 @@ nv_init_rings ( struct forcedeth_private *priv ) /* Allocate ring for both TX and RX */ priv->rx_ring = - malloc_dma ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 ); + malloc_phys ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 ); if ( ! priv->rx_ring ) goto err_malloc; priv->tx_ring = &priv->rx_ring[RX_RING_SIZE]; @@ -308,7 +308,7 @@ nv_free_rxtx_resources ( struct forcedeth_private *priv ) DBGP ( "nv_free_rxtx_resources\n" ); - free_dma ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE ); + free_phys ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE ); for ( i = 0; i < RX_RING_SIZE; i++ ) { free_iob ( priv->rx_iobuf[i] ); @@ -1762,7 +1762,7 @@ forcedeth_map_regs ( struct forcedeth_private *priv ) } rc = -ENOMEM; - ioaddr = ioremap ( addr, register_size ); + ioaddr = pci_ioremap ( priv->pci_dev, addr, register_size ); if ( ! ioaddr ) { DBG ( "Cannot remap MMIO\n" ); goto err_ioremap; diff --git a/src/drivers/net/icplus.c b/src/drivers/net/icplus.c index 4bed92427..acd2e2363 100644 --- a/src/drivers/net/icplus.c +++ b/src/drivers/net/icplus.c @@ -343,7 +343,7 @@ static int icplus_create_ring ( struct icplus_nic *icp, struct icplus_ring *ring struct icplus_descriptor *next; /* Allocate descriptor ring */ - ring->entry = malloc_dma ( len, ICP_ALIGN ); + ring->entry = malloc_phys ( len, ICP_ALIGN ); if ( ! ring->entry ) { rc = -ENOMEM; goto err_alloc; @@ -369,7 +369,7 @@ static int icplus_create_ring ( struct icplus_nic *icp, struct icplus_ring *ring ( virt_to_bus ( ring->entry ) + len ) ); return 0; - free_dma ( ring->entry, len ); + free_phys ( ring->entry, len ); ring->entry = NULL; err_alloc: return rc; @@ -386,7 +386,7 @@ static void icplus_destroy_ring ( struct icplus_nic *icp __unused, size_t len = ( sizeof ( ring->entry[0] ) * ICP_NUM_DESC ); /* Free descriptor ring */ - free_dma ( ring->entry, len ); + free_phys ( ring->entry, len ); ring->entry = NULL; } @@ -726,7 +726,7 @@ static int icplus_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - icp->regs = ioremap ( pci->membase, ICP_BAR_SIZE ); + icp->regs = pci_ioremap ( pci, pci->membase, ICP_BAR_SIZE ); if ( ! icp->regs ) { rc = -ENODEV; goto err_ioremap; diff --git a/src/drivers/net/igbvf/igbvf_main.c b/src/drivers/net/igbvf/igbvf_main.c index fc7021c38..a5ed0c451 100644 --- a/src/drivers/net/igbvf/igbvf_main.c +++ b/src/drivers/net/igbvf/igbvf_main.c @@ -46,7 +46,7 @@ int igbvf_setup_tx_resources ( struct igbvf_adapter *adapter ) /* Allocate transmit descriptor ring memory. It must not cross a 64K boundary because of hardware errata #23 - so we use malloc_dma() requesting a 128 byte block that is + so we use malloc_phys() requesting a 128 byte block that is 128 byte aligned. This should guarantee that the memory allocated will not cross a 64K boundary, because 128 is an even multiple of 65536 ( 65536 / 128 == 512 ), so all possible @@ -55,7 +55,7 @@ int igbvf_setup_tx_resources ( struct igbvf_adapter *adapter ) */ adapter->tx_base = - malloc_dma ( adapter->tx_ring_size, adapter->tx_ring_size ); + malloc_phys ( adapter->tx_ring_size, adapter->tx_ring_size ); if ( ! adapter->tx_base ) { return -ENOMEM; @@ -78,7 +78,7 @@ void igbvf_free_tx_resources ( struct igbvf_adapter *adapter ) { DBG ( "igbvf_free_tx_resources\n" ); - free_dma ( adapter->tx_base, adapter->tx_ring_size ); + free_phys ( adapter->tx_base, adapter->tx_ring_size ); } /** @@ -93,7 +93,7 @@ void igbvf_free_rx_resources ( struct igbvf_adapter *adapter ) DBG ( "igbvf_free_rx_resources\n" ); - free_dma ( adapter->rx_base, adapter->rx_ring_size ); + free_phys ( adapter->rx_base, adapter->rx_ring_size ); for ( i = 0; i < NUM_RX_DESC; i++ ) { free_iob ( adapter->rx_iobuf[i] ); @@ -574,7 +574,7 @@ int igbvf_setup_rx_resources ( struct igbvf_adapter *adapter ) */ adapter->rx_base = - malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size ); + malloc_phys ( adapter->rx_ring_size, adapter->rx_ring_size ); if ( ! adapter->rx_base ) { return -ENOMEM; @@ -843,7 +843,7 @@ int igbvf_probe ( struct pci_device *pdev ) DBG ( "mmio_start: %#08lx\n", mmio_start ); DBG ( "mmio_len: %#08lx\n", mmio_len ); - adapter->hw.hw_addr = ioremap ( mmio_start, mmio_len ); + adapter->hw.hw_addr = pci_ioremap ( pdev, mmio_start, mmio_len ); DBG ( "adapter->hw.hw_addr: %p\n", adapter->hw.hw_addr ); if ( ! adapter->hw.hw_addr ) { diff --git a/src/drivers/net/intel.c b/src/drivers/net/intel.c index bb0b673b9..ea3ebf68d 100644 --- a/src/drivers/net/intel.c +++ b/src/drivers/net/intel.c @@ -32,7 +32,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include +#include #include #include #include "intel.h" @@ -504,7 +504,8 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) { * prevent any possible page-crossing errors due to hardware * errata. */ - ring->desc = malloc_dma ( ring->len, ring->len ); + ring->desc = dma_alloc ( intel->dma, &ring->map, ring->len, + ring->len ); if ( ! ring->desc ) return -ENOMEM; @@ -512,7 +513,7 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) { memset ( ring->desc, 0, ring->len ); /* Program ring address */ - address = virt_to_bus ( ring->desc ); + address = dma ( &ring->map, ring->desc ); writel ( ( address & 0xffffffffUL ), ( intel->regs + ring->reg + INTEL_xDBAL ) ); if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) { @@ -534,9 +535,9 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) { dctl |= INTEL_xDCTL_ENABLE; writel ( dctl, intel->regs + ring->reg + INTEL_xDCTL ); - DBGC ( intel, "INTEL %p ring %05x is at [%08llx,%08llx)\n", - intel, ring->reg, ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + ring->len ) ); + DBGC ( intel, "INTEL %p ring %05x is at [%08lx,%08lx)\n", + intel, ring->reg, virt_to_phys ( ring->desc ), + ( virt_to_phys ( ring->desc ) + ring->len ) ); return 0; } @@ -553,7 +554,7 @@ void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) { intel_reset_ring ( intel, ring->reg ); /* Free descriptor ring */ - free_dma ( ring->desc, ring->len ); + dma_free ( &ring->map, ring->desc, ring->len ); ring->desc = NULL; ring->prod = 0; ring->cons = 0; @@ -569,14 +570,13 @@ void intel_refill_rx ( struct intel_nic *intel ) { struct io_buffer *iobuf; unsigned int rx_idx; unsigned int rx_tail; - physaddr_t address; unsigned int refilled = 0; /* Refill ring */ while ( ( intel->rx.prod - intel->rx.cons ) < INTEL_RX_FILL ) { /* Allocate I/O buffer */ - iobuf = alloc_iob ( INTEL_RX_MAX_LEN ); + iobuf = alloc_rx_iob ( INTEL_RX_MAX_LEN, intel->dma ); if ( ! iobuf ) { /* Wait for next refill */ break; @@ -587,16 +587,15 @@ void intel_refill_rx ( struct intel_nic *intel ) { rx = &intel->rx.desc[rx_idx]; /* Populate receive descriptor */ - address = virt_to_bus ( iobuf->data ); - intel->rx.describe ( rx, address, 0 ); + intel->rx.describe ( rx, iob_dma ( iobuf ), 0 ); /* Record I/O buffer */ assert ( intel->rx_iobuf[rx_idx] == NULL ); intel->rx_iobuf[rx_idx] = iobuf; - DBGC2 ( intel, "INTEL %p RX %d is [%llx,%llx)\n", intel, rx_idx, - ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + INTEL_RX_MAX_LEN ) ); + DBGC2 ( intel, "INTEL %p RX %d is [%lx,%lx)\n", + intel, rx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + INTEL_RX_MAX_LEN ) ); refilled++; } @@ -619,9 +618,10 @@ void intel_refill_rx ( struct intel_nic *intel ) { void intel_empty_rx ( struct intel_nic *intel ) { unsigned int i; + /* Discard unused receive buffers */ for ( i = 0 ; i < INTEL_NUM_RX_DESC ; i++ ) { if ( intel->rx_iobuf[i] ) - free_iob ( intel->rx_iobuf[i] ); + free_rx_iob ( intel->rx_iobuf[i] ); intel->rx_iobuf[i] = NULL; } } @@ -742,7 +742,6 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { struct intel_descriptor *tx; unsigned int tx_idx; unsigned int tx_tail; - physaddr_t address; size_t len; /* Get next transmit descriptor */ @@ -755,9 +754,8 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { tx = &intel->tx.desc[tx_idx]; /* Populate transmit descriptor */ - address = virt_to_bus ( iobuf->data ); len = iob_len ( iobuf ); - intel->tx.describe ( tx, address, len ); + intel->tx.describe ( tx, iob_dma ( iobuf ), len ); wmb(); /* Notify card that there are packets ready to transmit */ @@ -766,9 +764,9 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { profile_stop ( &intel_vm_tx_profiler ); profile_exclude ( &intel_vm_tx_profiler ); - DBGC2 ( intel, "INTEL %p TX %d is [%llx,%llx)\n", intel, tx_idx, - ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + len ) ); + DBGC2 ( intel, "INTEL %p TX %d is [%lx,%lx)\n", + intel, tx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + len ) ); return 0; } @@ -959,12 +957,17 @@ static int intel_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - intel->regs = ioremap ( pci->membase, INTEL_BAR_SIZE ); + intel->regs = pci_ioremap ( pci, pci->membase, INTEL_BAR_SIZE ); if ( ! intel->regs ) { rc = -ENODEV; goto err_ioremap; } + /* Configure DMA */ + intel->dma = &pci->dma; + dma_set_mask_64bit ( intel->dma ); + netdev->dma = intel->dma; + /* Reset the NIC */ if ( ( rc = intel_reset ( intel ) ) != 0 ) goto err_reset; @@ -1022,6 +1025,12 @@ static struct pci_device_id intel_nics[] = { PCI_ROM ( 0x8086, 0x043a, "dh8900cc-f", "DH8900CC Fiber", 0 ), PCI_ROM ( 0x8086, 0x043c, "dh8900cc-b", "DH8900CC Backplane", 0 ), PCI_ROM ( 0x8086, 0x0440, "dh8900cc-s", "DH8900CC SFP", 0 ), + PCI_ROM ( 0x8086, 0x0d4c, "i219lm-11", "I219-LM (11)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x0d4d, "i219v-11", "I219-V (11)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x0d4e, "i219lm-10", "I219-LM (10)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x0d4f, "i219v-10", "I219-V (10)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x0d53, "i219lm-12", "I219-LM (12)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x0d55, "i219v-12", "I219-V (12)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x1000, "82542-f", "82542 (Fiber)", 0 ), PCI_ROM ( 0x8086, 0x1001, "82543gc-f", "82543GC (Fiber)", 0 ), PCI_ROM ( 0x8086, 0x1004, "82543gc", "82543GC (Copper)", 0 ), @@ -1134,8 +1143,8 @@ static struct pci_device_id intel_nics[] = { PCI_ROM ( 0x8086, 0x1539, "i211", "I211", 0 ), PCI_ROM ( 0x8086, 0x153a, "i217lm", "I217-LM", INTEL_NO_PHY_RST ), PCI_ROM ( 0x8086, 0x153b, "i217v", "I217-V", 0 ), - PCI_ROM ( 0x8086, 0x1559, "i218v", "I218-V", 0), - PCI_ROM ( 0x8086, 0x155a, "i218lm", "I218-LM", 0), + PCI_ROM ( 0x8086, 0x1559, "i218v", "I218-V", INTEL_NO_PHY_RST ), + PCI_ROM ( 0x8086, 0x155a, "i218lm", "I218-LM", INTEL_NO_PHY_RST ), PCI_ROM ( 0x8086, 0x156f, "i219lm", "I219-LM", INTEL_I219 ), PCI_ROM ( 0x8086, 0x1570, "i219v", "I219-V", INTEL_I219 ), PCI_ROM ( 0x8086, 0x157b, "i210-2", "I210", 0 ), @@ -1158,6 +1167,12 @@ static struct pci_device_id intel_nics[] = { PCI_ROM ( 0x8086, 0x15e1, "i219lm-9", "I219-LM (9)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x15e2, "i219v-9", "I219-V (9)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x15e3, "i219lm-5", "I219-LM (5)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x15f4, "i219lm-15", "I219-LM (15)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x15f5, "i219v-15", "I219-V (15)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x15f9, "i219lm-14", "I219-LM (14)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x15fa, "i219v-14", "I219-V (14)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x15fb, "i219lm-13", "I219-LM (13)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x15fc, "i219v-13", "I219-V (13)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x1f41, "i354", "I354", INTEL_NO_ASDE ), PCI_ROM ( 0x8086, 0x294c, "82566dc-2", "82566DC-2", 0 ), PCI_ROM ( 0x8086, 0x2e6e, "cemedia", "CE Media Processor", 0 ), diff --git a/src/drivers/net/intel.h b/src/drivers/net/intel.h index 9d740efc3..4f51a80f6 100644 --- a/src/drivers/net/intel.h +++ b/src/drivers/net/intel.h @@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include /** Intel BAR size */ #define INTEL_BAR_SIZE ( 128 * 1024 ) @@ -212,6 +213,8 @@ union intel_receive_address { struct intel_ring { /** Descriptors */ struct intel_descriptor *desc; + /** Descriptor ring DMA mapping */ + struct dma_mapping map; /** Producer index */ unsigned int prod; /** Consumer index */ @@ -277,6 +280,8 @@ intel_init_mbox ( struct intel_mailbox *mbox, unsigned int ctrl, struct intel_nic { /** Registers */ void *regs; + /** DMA device */ + struct dma_device *dma; /** Port number (for multi-port devices) */ unsigned int port; /** Flags */ diff --git a/src/drivers/net/intelvf.c b/src/drivers/net/intelvf.c index ac6fea745..0d48b4178 100644 --- a/src/drivers/net/intelvf.c +++ b/src/drivers/net/intelvf.c @@ -52,14 +52,15 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); */ static void intelvf_mbox_write ( struct intel_nic *intel, const union intelvf_msg *msg ) { + const struct intelvf_msg_raw *raw = &msg->raw; unsigned int i; /* Write message */ DBGC2 ( intel, "INTEL %p sending message", intel ); - for ( i = 0 ; i < ( sizeof ( *msg ) / sizeof ( msg->dword[0] ) ) ; i++){ - DBGC2 ( intel, "%c%08x", ( i ? ':' : ' ' ), msg->dword[i] ); - writel ( msg->dword[i], ( intel->regs + intel->mbox.mem + - ( i * sizeof ( msg->dword[0] ) ) ) ); + for ( i = 0 ; i < ( sizeof ( *msg ) / sizeof ( raw->dword[0] ) ) ; i++){ + DBGC2 ( intel, "%c%08x", ( i ? ':' : ' ' ), raw->dword[i] ); + writel ( raw->dword[i], ( intel->regs + intel->mbox.mem + + ( i * sizeof ( raw->dword[0] ) ) ) ); } DBGC2 ( intel, "\n" ); } @@ -72,14 +73,15 @@ static void intelvf_mbox_write ( struct intel_nic *intel, */ static void intelvf_mbox_read ( struct intel_nic *intel, union intelvf_msg *msg ) { + struct intelvf_msg_raw *raw = &msg->raw; unsigned int i; /* Read message */ DBGC2 ( intel, "INTEL %p received message", intel ); - for ( i = 0 ; i < ( sizeof ( *msg ) / sizeof ( msg->dword[0] ) ) ; i++){ - msg->dword[i] = readl ( intel->regs + intel->mbox.mem + - ( i * sizeof ( msg->dword[0] ) ) ); - DBGC2 ( intel, "%c%08x", ( i ? ':' : ' ' ), msg->dword[i] ); + for ( i = 0 ; i < ( sizeof ( *msg ) / sizeof ( raw->dword[0] ) ) ; i++){ + raw->dword[i] = readl ( intel->regs + intel->mbox.mem + + ( i * sizeof ( raw->dword[0] ) ) ); + DBGC2 ( intel, "%c%08x", ( i ? ':' : ' ' ), raw->dword[i] ); } DBGC2 ( intel, "\n" ); } diff --git a/src/drivers/net/intelvf.h b/src/drivers/net/intelvf.h index ab404698f..ffb18e040 100644 --- a/src/drivers/net/intelvf.h +++ b/src/drivers/net/intelvf.h @@ -119,6 +119,12 @@ struct intelvf_msg_queues { uint32_t dflt; } __attribute__ (( packed )); +/** Raw mailbox message */ +struct intelvf_msg_raw { + /** Raw dwords */ + uint32_t dword[0]; +} __attribute__ (( packed )); + /** Mailbox message */ union intelvf_msg { /** Message header */ @@ -132,7 +138,7 @@ union intelvf_msg { /** Queue configuration message */ struct intelvf_msg_queues queues; /** Raw dwords */ - uint32_t dword[0]; + struct intelvf_msg_raw raw; }; /** Maximum time to wait for mailbox message diff --git a/src/drivers/net/intelx.c b/src/drivers/net/intelx.c index 47de90c88..ccf6b0648 100644 --- a/src/drivers/net/intelx.c +++ b/src/drivers/net/intelx.c @@ -32,7 +32,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include #include "intelx.h" @@ -405,12 +404,17 @@ static int intelx_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - intel->regs = ioremap ( pci->membase, INTEL_BAR_SIZE ); + intel->regs = pci_ioremap ( pci, pci->membase, INTEL_BAR_SIZE ); if ( ! intel->regs ) { rc = -ENODEV; goto err_ioremap; } + /* Configure DMA */ + intel->dma = &pci->dma; + dma_set_mask_64bit ( intel->dma ); + netdev->dma = intel->dma; + /* Reset the NIC */ if ( ( rc = intelx_reset ( intel ) ) != 0 ) goto err_reset; @@ -475,6 +479,8 @@ static struct pci_device_id intelx_nics[] = { PCI_ROM ( 0x8086, 0x1560, "x540t1", "X540-AT2/X540-BT2 (with single port NVM)", 0 ), PCI_ROM ( 0x8086, 0x1563, "x550t2", "X550-T2", 0 ), PCI_ROM ( 0x8086, 0x15ab, "x552", "X552", 0 ), + PCI_ROM ( 0x8086, 0x15c8, "x553t", "X553/X557-AT", 0 ), + PCI_ROM ( 0x8086, 0x15ce, "x553-sfp", "X553 (SFP+)", 0 ), PCI_ROM ( 0x8086, 0x15e5, "x553", "X553", 0 ), }; diff --git a/src/drivers/net/intelxl.c b/src/drivers/net/intelxl.c index c98ba265c..ac9e37c5a 100644 --- a/src/drivers/net/intelxl.c +++ b/src/drivers/net/intelxl.c @@ -34,7 +34,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include #include #include "intelxl.h" @@ -87,15 +86,16 @@ static int intelxl_reset ( struct intelxl_nic *intelxl ) { static int intelxl_fetch_mac ( struct intelxl_nic *intelxl, struct net_device *netdev ) { union intelxl_receive_address mac; - uint32_t prtgl_sal; + uint32_t prtpm_sal; + uint32_t prtpm_sah; uint32_t prtgl_sah; size_t mfs; /* Read NVM-loaded address */ - prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL ); - prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH ); - mac.reg.low = cpu_to_le32 ( prtgl_sal ); - mac.reg.high = cpu_to_le32 ( prtgl_sah ); + prtpm_sal = readl ( intelxl->regs + INTELXL_PRTPM_SAL ); + prtpm_sah = readl ( intelxl->regs + INTELXL_PRTPM_SAH ); + mac.reg.low = cpu_to_le32 ( prtpm_sal ); + mac.reg.high = cpu_to_le32 ( prtpm_sah ); /* Check that address is valid */ if ( ! is_valid_ether_addr ( mac.raw ) ) { @@ -110,6 +110,7 @@ static int intelxl_fetch_mac ( struct intelxl_nic *intelxl, memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN ); /* Get maximum frame size */ + prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH ); mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah ); netdev->max_pkt_len = ( mfs - 4 /* CRC */ ); @@ -134,20 +135,36 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl, struct pci_device *pci ) { int rc; + /* Map dummy target location */ + if ( ( rc = dma_map ( intelxl->dma, &intelxl->msix.map, + virt_to_phys ( &intelxl->msix.msg ), + sizeof ( intelxl->msix.msg ), DMA_RX ) ) != 0 ) { + DBGC ( intelxl, "INTELXL %p could not map MSI-X target: %s\n", + intelxl, strerror ( rc ) ); + goto err_map; + } + /* Enable MSI-X capability */ - if ( ( rc = pci_msix_enable ( pci, &intelxl->msix ) ) != 0 ) { + if ( ( rc = pci_msix_enable ( pci, &intelxl->msix.cap ) ) != 0 ) { DBGC ( intelxl, "INTELXL %p could not enable MSI-X: %s\n", intelxl, strerror ( rc ) ); - return rc; + goto err_enable; } /* Configure interrupt zero to write to dummy location */ - pci_msix_map ( &intelxl->msix, 0, virt_to_bus ( &intelxl->msg ), 0 ); + pci_msix_map ( &intelxl->msix.cap, 0, + dma ( &intelxl->msix.map, &intelxl->msix.msg ), 0 ); /* Enable dummy interrupt zero */ - pci_msix_unmask ( &intelxl->msix, 0 ); + pci_msix_unmask ( &intelxl->msix.cap, 0 ); return 0; + + pci_msix_disable ( pci, &intelxl->msix.cap ); + err_enable: + dma_unmap ( &intelxl->msix.map ); + err_map: + return rc; } /** @@ -160,10 +177,13 @@ void intelxl_msix_disable ( struct intelxl_nic *intelxl, struct pci_device *pci ) { /* Disable dummy interrupt zero */ - pci_msix_mask ( &intelxl->msix, 0 ); + pci_msix_mask ( &intelxl->msix.cap, 0 ); /* Disable MSI-X capability */ - pci_msix_disable ( pci, &intelxl->msix ); + pci_msix_disable ( pci, &intelxl->msix.cap ); + + /* Unmap dummy target location */ + dma_unmap ( &intelxl->msix.map ); } /****************************************************************************** @@ -195,19 +215,19 @@ static int intelxl_alloc_admin ( struct intelxl_nic *intelxl, size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC ); /* Allocate admin queue */ - admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN ); + admin->buf = dma_alloc ( intelxl->dma, &admin->map, ( buf_len + len ), + INTELXL_ALIGN ); if ( ! admin->buf ) return -ENOMEM; admin->desc = ( ( ( void * ) admin->buf ) + buf_len ); - DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf " - "[%08llx,%08llx)\n", intelxl, + DBGC ( intelxl, "INTELXL %p A%cQ is at [%08lx,%08lx) buf " + "[%08lx,%08lx)\n", intelxl, ( ( admin == &intelxl->command ) ? 'T' : 'R' ), - ( ( unsigned long long ) virt_to_bus ( admin->desc ) ), - ( ( unsigned long long ) ( virt_to_bus ( admin->desc ) + len ) ), - ( ( unsigned long long ) virt_to_bus ( admin->buf ) ), - ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) + - buf_len ) ) ); + virt_to_phys ( admin->desc ), + ( virt_to_phys ( admin->desc ) + len ), + virt_to_phys ( admin->buf ), + ( virt_to_phys ( admin->buf ) + buf_len ) ); return 0; } @@ -235,7 +255,7 @@ static void intelxl_enable_admin ( struct intelxl_nic *intelxl, admin->index = 0; /* Program queue address */ - address = virt_to_bus ( admin->desc ); + address = dma ( &admin->map, admin->desc ); writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal ); if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) { writel ( ( ( ( uint64_t ) address ) >> 32 ), @@ -277,7 +297,7 @@ static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused, size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC ); /* Free queue */ - free_dma ( admin->buf, ( buf_len + len ) ); + dma_free ( &admin->map, admin->buf, ( buf_len + len ) ); } /** @@ -330,7 +350,7 @@ static void intelxl_admin_event_init ( struct intelxl_nic *intelxl, /* Initialise descriptor */ evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ]; buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ]; - address = virt_to_bus ( buf ); + address = dma ( &admin->map, buf ); evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ); evt->len = cpu_to_le16 ( sizeof ( *buf ) ); evt->params.buffer.high = cpu_to_le32 ( address >> 32 ); @@ -375,7 +395,7 @@ int intelxl_admin_command ( struct intelxl_nic *intelxl ) { /* Populate data buffer address if applicable */ if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) { - address = virt_to_bus ( buf ); + address = dma ( &admin->map, buf ); cmd->params.buffer.high = cpu_to_le32 ( address >> 32 ); cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL ); } @@ -922,16 +942,15 @@ void intelxl_close_admin ( struct intelxl_nic *intelxl ) { */ int intelxl_alloc_ring ( struct intelxl_nic *intelxl, struct intelxl_ring *ring ) { - physaddr_t address; int rc; /* Allocate descriptor ring */ - ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN ); + ring->desc.raw = dma_alloc ( intelxl->dma, &ring->map, ring->len, + INTELXL_ALIGN ); if ( ! ring->desc.raw ) { rc = -ENOMEM; goto err_alloc; } - address = virt_to_bus ( ring->desc.raw ); /* Initialise descriptor ring */ memset ( ring->desc.raw, 0, ring->len ); @@ -943,14 +962,14 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl, ring->prod = 0; ring->cons = 0; - DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n", + DBGC ( intelxl, "INTELXL %p ring %06x is at [%08lx,%08lx)\n", intelxl, ( ring->reg + ring->tail ), - ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + ring->len ) ); + virt_to_phys ( ring->desc.raw ), + ( virt_to_phys ( ring->desc.raw ) + ring->len ) ); return 0; - free_dma ( ring->desc.raw, ring->len ); + dma_free ( &ring->map, ring->desc.raw, ring->len ); err_alloc: return rc; } @@ -965,7 +984,7 @@ void intelxl_free_ring ( struct intelxl_nic *intelxl __unused, struct intelxl_ring *ring ) { /* Free descriptor ring */ - free_dma ( ring->desc.raw, ring->len ); + dma_free ( &ring->map, ring->desc.raw, ring->len ); ring->desc.raw = NULL; } @@ -1241,7 +1260,7 @@ static int intelxl_create_ring ( struct intelxl_nic *intelxl, goto err_alloc; /* Program queue context */ - address = virt_to_bus ( ring->desc.raw ); + address = dma ( &ring->map, ring->desc.raw ); if ( ( rc = ring->context ( intelxl, address ) ) != 0 ) goto err_context; @@ -1289,14 +1308,13 @@ static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) { struct io_buffer *iobuf; unsigned int rx_idx; unsigned int rx_tail; - physaddr_t address; unsigned int refilled = 0; /* Refill ring */ while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) { /* Allocate I/O buffer */ - iobuf = alloc_iob ( intelxl->mfs ); + iobuf = alloc_rx_iob ( intelxl->mfs, intelxl->dma ); if ( ! iobuf ) { /* Wait for next refill */ break; @@ -1307,17 +1325,16 @@ static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) { rx = &intelxl->rx.desc.rx[rx_idx].data; /* Populate receive descriptor */ - address = virt_to_bus ( iobuf->data ); - rx->address = cpu_to_le64 ( address ); + rx->address = cpu_to_le64 ( iob_dma ( iobuf ) ); rx->flags = 0; /* Record I/O buffer */ assert ( intelxl->rx_iobuf[rx_idx] == NULL ); intelxl->rx_iobuf[rx_idx] = iobuf; - DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl, - rx_idx, ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + intelxl->mfs ) ); + DBGC2 ( intelxl, "INTELXL %p RX %d is [%08lx,%08lx)\n", + intelxl, rx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + intelxl->mfs ) ); refilled++; } @@ -1340,7 +1357,7 @@ void intelxl_empty_rx ( struct intelxl_nic *intelxl ) { /* Discard any unused receive buffers */ for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) { if ( intelxl->rx_iobuf[i] ) - free_iob ( intelxl->rx_iobuf[i] ); + free_rx_iob ( intelxl->rx_iobuf[i] ); intelxl->rx_iobuf[i] = NULL; } } @@ -1467,7 +1484,6 @@ int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { struct intelxl_tx_data_descriptor *tx; unsigned int tx_idx; unsigned int tx_tail; - physaddr_t address; size_t len; /* Get next transmit descriptor */ @@ -1481,9 +1497,8 @@ int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { tx = &intelxl->tx.desc.tx[tx_idx].data; /* Populate transmit descriptor */ - address = virt_to_bus ( iobuf->data ); len = iob_len ( iobuf ); - tx->address = cpu_to_le64 ( address ); + tx->address = cpu_to_le64 ( iob_dma ( iobuf ) ); tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) ); tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP | INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI ); @@ -1492,9 +1507,9 @@ int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { /* Notify card that there are packets ready to transmit */ writel ( tx_tail, ( intelxl->regs + intelxl->tx.tail ) ); - DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx, - ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + len ) ); + DBGC2 ( intelxl, "INTELXL %p TX %d is [%08lx,%08lx)\n", + intelxl, tx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + len ) ); return 0; } @@ -1641,6 +1656,7 @@ static struct net_device_operations intelxl_operations = { static int intelxl_probe ( struct pci_device *pci ) { struct net_device *netdev; struct intelxl_nic *intelxl; + uint32_t pffunc_rid; uint32_t pfgen_portnum; uint32_t pflan_qalloc; int rc; @@ -1656,7 +1672,6 @@ static int intelxl_probe ( struct pci_device *pci ) { pci_set_drvdata ( pci, netdev ); netdev->dev = &pci->dev; memset ( intelxl, 0, sizeof ( *intelxl ) ); - intelxl->pf = PCI_FUNC ( pci->busdevfn ); intelxl->intr = INTELXL_PFINT_DYN_CTL0; intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD, &intelxl_admin_offsets ); @@ -1673,17 +1688,24 @@ static int intelxl_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE ); + intelxl->regs = pci_ioremap ( pci, pci->membase, INTELXL_BAR_SIZE ); if ( ! intelxl->regs ) { rc = -ENODEV; goto err_ioremap; } + /* Configure DMA */ + intelxl->dma = &pci->dma; + dma_set_mask_64bit ( intelxl->dma ); + netdev->dma = intelxl->dma; + /* Reset the NIC */ if ( ( rc = intelxl_reset ( intelxl ) ) != 0 ) goto err_reset; - /* Get port number and base queue number */ + /* Get function number, port number and base queue number */ + pffunc_rid = readl ( intelxl->regs + INTELXL_PFFUNC_RID ); + intelxl->pf = INTELXL_PFFUNC_RID_FUNC_NUM ( pffunc_rid ); pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM ); intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum ); pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC ); diff --git a/src/drivers/net/intelxl.h b/src/drivers/net/intelxl.h index 80586cef0..a4a776d28 100644 --- a/src/drivers/net/intelxl.h +++ b/src/drivers/net/intelxl.h @@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include struct intelxl_nic; @@ -562,6 +563,8 @@ struct intelxl_admin { struct intelxl_admin_descriptor *desc; /** Data buffers */ union intelxl_admin_buffer *buf; + /** DMA mapping */ + struct dma_mapping map; /** Queue index */ unsigned int index; @@ -866,6 +869,8 @@ struct intelxl_ring { /** Raw data */ void *raw; } desc; + /** Descriptor ring DMA mapping */ + struct dma_mapping map; /** Producer index */ unsigned int prod; /** Consumer index */ @@ -985,6 +990,11 @@ intelxl_init_ring ( struct intelxl_ring *ring, unsigned int count, size_t len, /** Time to delay for device reset, in milliseconds */ #define INTELXL_RESET_DELAY_MS 100 +/** Function Requester ID Information Register */ +#define INTELXL_PFFUNC_RID 0x09c000 +#define INTELXL_PFFUNC_RID_FUNC_NUM(x) \ + ( ( (x) >> 0 ) & 0x3 ) /**< Function number */ + /** PF Queue Allocation Register */ #define INTELXL_PFLAN_QALLOC 0x1c0400 #define INTELXL_PFLAN_QALLOC_FIRSTQ(x) \ @@ -1005,6 +1015,12 @@ intelxl_init_ring ( struct intelxl_ring *ring, unsigned int count, size_t len, #define INTELXL_PRTGL_SAH_MFS_GET(x) ( (x) >> 16 ) /**< Max frame size */ #define INTELXL_PRTGL_SAH_MFS_SET(x) ( (x) << 16 ) /**< Max frame size */ +/** Physical Function MAC Address Low Register */ +#define INTELXL_PRTPM_SAL 0x1e4440 + +/** Physical Function MAC Address High Register */ +#define INTELXL_PRTPM_SAH 0x1e44c0 + /** Receive address */ union intelxl_receive_address { struct { @@ -1014,10 +1030,22 @@ union intelxl_receive_address { uint8_t raw[ETH_ALEN]; }; +/** MSI-X interrupt */ +struct intelxl_msix { + /** PCI capability */ + struct pci_msix cap; + /** MSI-X dummy interrupt target */ + uint32_t msg; + /** DMA mapping for dummy interrupt target */ + struct dma_mapping map; +}; + /** An Intel 40Gigabit network card */ struct intelxl_nic { /** Registers */ void *regs; + /** DMA device */ + struct dma_device *dma; /** Maximum frame size */ size_t mfs; @@ -1035,12 +1063,10 @@ struct intelxl_nic { unsigned int qset; /** Interrupt control register */ unsigned int intr; - /** MSI-X capability */ - struct pci_msix msix; - /** MSI-X dummy interrupt target */ - uint32_t msg; /** PCI Express capability offset */ unsigned int exp; + /** MSI-X interrupt */ + struct intelxl_msix msix; /** Admin command queue */ struct intelxl_admin command; diff --git a/src/drivers/net/intelxlvf.c b/src/drivers/net/intelxlvf.c index 8f76daf3d..752de7815 100644 --- a/src/drivers/net/intelxlvf.c +++ b/src/drivers/net/intelxlvf.c @@ -298,9 +298,9 @@ void intelxlvf_admin_event ( struct net_device *netdev, if ( intelxl->vret != 0 ) { DBGC ( intelxl, "INTELXL %p admin VF command %#x " "error %d\n", intelxl, vopcode, intelxl->vret ); - DBGC_HDA ( intelxl, virt_to_bus ( evt ), evt, + DBGC_HDA ( intelxl, virt_to_phys ( evt ), evt, sizeof ( *evt ) ); - DBGC_HDA ( intelxl, virt_to_bus ( buf ), buf, + DBGC_HDA ( intelxl, virt_to_phys ( buf ), buf, le16_to_cpu ( evt->len ) ); } return; @@ -314,8 +314,10 @@ void intelxlvf_admin_event ( struct net_device *netdev, default: DBGC ( intelxl, "INTELXL %p unrecognised VF event %#x:\n", intelxl, vopcode ); - DBGC_HDA ( intelxl, 0, evt, sizeof ( *evt ) ); - DBGC_HDA ( intelxl, 0, buf, le16_to_cpu ( evt->len ) ); + DBGC_HDA ( intelxl, virt_to_phys ( evt ), evt, + sizeof ( *evt ) ); + DBGC_HDA ( intelxl, virt_to_phys ( buf ), buf, + le16_to_cpu ( evt->len ) ); break; } } @@ -378,12 +380,14 @@ static int intelxlvf_admin_configure ( struct net_device *netdev ) { buf->cfg.count = cpu_to_le16 ( 1 ); buf->cfg.tx.vsi = cpu_to_le16 ( intelxl->vsi ); buf->cfg.tx.count = cpu_to_le16 ( INTELXL_TX_NUM_DESC ); - buf->cfg.tx.base = cpu_to_le64 ( virt_to_bus ( intelxl->tx.desc.raw ) ); + buf->cfg.tx.base = cpu_to_le64 ( dma ( &intelxl->tx.map, + intelxl->tx.desc.raw ) ); buf->cfg.rx.vsi = cpu_to_le16 ( intelxl->vsi ); buf->cfg.rx.count = cpu_to_le32 ( INTELXL_RX_NUM_DESC ); buf->cfg.rx.len = cpu_to_le32 ( intelxl->mfs ); buf->cfg.rx.mfs = cpu_to_le32 ( intelxl->mfs ); - buf->cfg.rx.base = cpu_to_le64 ( virt_to_bus ( intelxl->rx.desc.raw ) ); + buf->cfg.rx.base = cpu_to_le64 ( dma ( &intelxl->rx.map, + intelxl->rx.desc.raw ) ); /* Issue command */ if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 ) @@ -612,12 +616,17 @@ static int intelxlvf_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - intelxl->regs = ioremap ( pci->membase, INTELXLVF_BAR_SIZE ); + intelxl->regs = pci_ioremap ( pci, pci->membase, INTELXLVF_BAR_SIZE ); if ( ! intelxl->regs ) { rc = -ENODEV; goto err_ioremap; } + /* Configure DMA */ + intelxl->dma = &pci->dma; + dma_set_mask_64bit ( intelxl->dma ); + netdev->dma = intelxl->dma; + /* Locate PCI Express capability */ intelxl->exp = pci_find_capability ( pci, PCI_CAP_ID_EXP ); if ( ! intelxl->exp ) { diff --git a/src/drivers/net/intelxvf.c b/src/drivers/net/intelxvf.c index 2caeec27e..d50bac698 100644 --- a/src/drivers/net/intelxvf.c +++ b/src/drivers/net/intelxvf.c @@ -456,12 +456,17 @@ static int intelxvf_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - intel->regs = ioremap ( pci->membase, INTELVF_BAR_SIZE ); + intel->regs = pci_ioremap ( pci, pci->membase, INTELVF_BAR_SIZE ); if ( ! intel->regs ) { rc = -ENODEV; goto err_ioremap; } + /* Configure DMA */ + intel->dma = &pci->dma; + dma_set_mask_64bit ( intel->dma ); + netdev->dma = intel->dma; + /* Reset the function */ intelxvf_reset ( intel ); @@ -525,6 +530,7 @@ static struct pci_device_id intelxvf_nics[] = { PCI_ROM ( 0x8086, 0x1515, "x540-vf", "X540 VF", 0 ), PCI_ROM ( 0x8086, 0x1565, "x550-vf", "X550 VF", 0 ), PCI_ROM ( 0x8086, 0x15a8, "x552-vf", "X552 VF", 0 ), + PCI_ROM ( 0x8086, 0x15c5, "x557-vf", "X557-AT2 VF", 0 ), }; /** PCI driver */ diff --git a/src/drivers/net/iphone.c b/src/drivers/net/iphone.c new file mode 100644 index 000000000..7d0eb4b64 --- /dev/null +++ b/src/drivers/net/iphone.c @@ -0,0 +1,2268 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iphone.h" + +/** @file + * + * iPhone USB Ethernet driver + * + */ + +/* Disambiguate the various error causes */ +#define EPIPE_NO_MUX __einfo_error ( EINFO_EPIPE_NO_MUX ) +#define EINFO_EPIPE_NO_MUX \ + __einfo_uniqify ( EINFO_EPIPE, 0x01, \ + "No USB multiplexer" ) +#define EINPROGRESS_PAIRING __einfo_error ( EINFO_EINPROGRESS_PAIRING ) +#define EINFO_EINPROGRESS_PAIRING \ + __einfo_uniqify ( EINFO_EINPROGRESS, 0x01, \ + "Pairing in progress" ) +#define ENOTCONN_DISABLED __einfo_error ( EINFO_ENOTCONN_DISABLED ) +#define EINFO_ENOTCONN_DISABLED \ + __einfo_uniqify ( EINFO_ENOTCONN, IPHONE_LINK_DISABLED, \ + "Personal Hotspot disabled" ) +#define ENOTCONN_STATUS( status ) \ + EUNIQ ( EINFO_ENOTCONN, ( (status) & 0x1f ), \ + ENOTCONN_DISABLED ) + +static int ipair_create ( struct interface *xfer, unsigned int flags ); + +/** Bulk IN completion profiler */ +static struct profiler iphone_in_profiler __profiler = + { .name = "iphone.in" }; + +/** Bulk OUT profiler */ +static struct profiler iphone_out_profiler __profiler = + { .name = "iphone.out" }; + +/** List of USB multiplexers */ +static LIST_HEAD ( imuxes ); + +/** List of iPhone network devices */ +static LIST_HEAD ( iphones ); + +/****************************************************************************** + * + * iPhone pairing certificates + * + ****************************************************************************** + */ + +/** iPhone root certificate fingerprint */ +static uint8_t icert_root_fingerprint[SHA256_DIGEST_SIZE]; + +/** Root of trust for iPhone certificates */ +static struct x509_root icert_root = { + .refcnt = REF_INIT ( ref_no_free ), + .digest = &sha256_algorithm, + .count = 1, + .fingerprints = icert_root_fingerprint, +}; + +/** Single zero byte used in constructed certificates */ +static const uint8_t icert_nul[] = { 0x00 }; + +/** "RSA algorithm" identifier used in constructed certificates */ +static const uint8_t icert_rsa[] = { + /* algorithm */ + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_OID, ASN1_OID_RSAENCRYPTION ), + ASN1_NULL, 0x00 ) +}; + +/** "SHA-256 with RSA algorithm" identifier used in constructed certificates */ +static const uint8_t icert_sha256_rsa[] = { + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_OID, ASN1_OID_SHA256WITHRSAENCRYPTION ), + ASN1_NULL, 0x00 ), +}; + +/** Extensions used in constructed root certificate */ +static const uint8_t icert_root_exts_data[] = { + /* extensions */ + ASN1_SHORT ( ASN1_EXPLICIT_TAG ( 3 ), ASN1_SHORT ( ASN1_SEQUENCE, + /* basicConstraints */ + ASN1_SHORT ( ASN1_SEQUENCE, + /* extnID */ + ASN1_SHORT ( ASN1_OID, ASN1_OID_BASICCONSTRAINTS ), + /* critical */ + ASN1_SHORT ( ASN1_BOOLEAN, 0xff ), + /* extnValue */ + ASN1_SHORT ( ASN1_OCTET_STRING, + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_BOOLEAN, + 0xff ) ) ) ) ) ) +}; + +/** Extensions used in constructed root certificate */ +static struct asn1_cursor icert_root_exts = + ASN1_CURSOR ( icert_root_exts_data ); + +/** Extensions used in constructed leaf certificates */ +static const uint8_t icert_leaf_exts_data[] = { + /* extensions */ + ASN1_SHORT ( ASN1_EXPLICIT_TAG ( 3 ), ASN1_SHORT ( ASN1_SEQUENCE, + /* basicConstraints */ + ASN1_SHORT ( ASN1_SEQUENCE, + /* extnID */ + ASN1_SHORT ( ASN1_OID, ASN1_OID_BASICCONSTRAINTS ), + /* critical */ + ASN1_SHORT ( ASN1_BOOLEAN, 0xff ), + /* extnValue */ + ASN1_SHORT ( ASN1_OCTET_STRING, + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_BOOLEAN, + 0x00 ) ) ) ), + /* keyUsage */ + ASN1_SHORT ( ASN1_SEQUENCE, + /* extnID */ + ASN1_SHORT ( ASN1_OID, ASN1_OID_KEYUSAGE ), + /* critical */ + ASN1_SHORT ( ASN1_BOOLEAN, 0xff ), + /* extnValue */ + ASN1_SHORT ( ASN1_OCTET_STRING, + ASN1_SHORT ( ASN1_BIT_STRING, 0x07, + ( X509_DIGITAL_SIGNATURE | + X509_KEY_ENCIPHERMENT ), + 0x00 ) ) ) ) ) +}; + +/** Extensions used in constructed leaf certificates */ +static struct asn1_cursor icert_leaf_exts = + ASN1_CURSOR ( icert_leaf_exts_data ); + +/** "TBSCertificate" prefix in constructed certificates */ +static const uint8_t icert_tbs_prefix[] = { + /* version */ + ASN1_SHORT ( ASN1_EXPLICIT_TAG ( 0 ), ASN1_SHORT ( ASN1_INTEGER, 2 ) ), + /* serialNumber */ + ASN1_SHORT ( ASN1_INTEGER, 0 ), + /* signature */ + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_OID, ASN1_OID_SHA256WITHRSAENCRYPTION ), + ASN1_NULL, 0x00 ) +}; + +/** Validity period in constructed certificates */ +static const uint8_t icert_validity[] = { + /* validity */ + ASN1_SHORT ( ASN1_SEQUENCE, + /* notBefore */ + ASN1_SHORT ( ASN1_GENERALIZED_TIME, + '1', '9', '7', '8', '1', '2', '1', '0', + '2', '2', '0', '0', '0', '0', 'Z' ), + /* notAfter */ + ASN1_SHORT ( ASN1_GENERALIZED_TIME, + '2', '9', '9', '9', '0', '1', '0', '1', + '0', '0', '0', '0', '0', '0', 'Z' ) ) +}; + +/** "Root" subject name */ +static const uint8_t icert_name_root_data[] = { + ASN1_SHORT ( ASN1_SEQUENCE, ASN1_SHORT ( ASN1_SET, + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_OID, ASN1_OID_COMMON_NAME ), + ASN1_SHORT ( ASN1_UTF8_STRING, 'R', 'o', 'o', 't' ) ) ) ) +}; + +/** "Root" subject name */ +static struct asn1_cursor icert_name_root = + ASN1_CURSOR ( icert_name_root_data ); + +/** "iPXE" subject name */ +static const uint8_t icert_name_ipxe_data[] = { + ASN1_SHORT ( ASN1_SEQUENCE, ASN1_SHORT ( ASN1_SET, + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_OID, ASN1_OID_COMMON_NAME ), + ASN1_SHORT ( ASN1_UTF8_STRING, 'i', 'P', 'X', 'E' ) ) ) ) +}; + +/** "iPXE" subject name */ +static struct asn1_cursor icert_name_ipxe = + ASN1_CURSOR ( icert_name_ipxe_data ); + +/** "iPhone" subject name */ +static const uint8_t icert_name_iphone_data[] = { + ASN1_SHORT ( ASN1_SEQUENCE, ASN1_SHORT ( ASN1_SET, + ASN1_SHORT ( ASN1_SEQUENCE, + ASN1_SHORT ( ASN1_OID, ASN1_OID_COMMON_NAME ), + ASN1_SHORT ( ASN1_UTF8_STRING, + 'i', 'P', 'h', 'o', 'n', 'e' ) ) ) ) +}; + +/** "iPhone" subject name */ +static struct asn1_cursor icert_name_iphone = + ASN1_CURSOR ( icert_name_iphone_data ); + +/** Public key(s) used for pairing */ +static const uint8_t icert_public_a[] __unused = { + 0x02, 0x81, 0x81, 0x00, 0xc9, 0xc0, 0xdd, 0xa6, 0xd5, 0xf9, 0x05, 0x3e, + 0x1d, 0xcb, 0x67, 0x08, 0xa8, 0x50, 0x27, 0x63, 0x95, 0x87, 0x42, 0x7e, + 0xfb, 0xff, 0x55, 0x55, 0xb8, 0xc0, 0x6f, 0x13, 0xcb, 0xf7, 0xc5, 0x1b, + 0xda, 0x44, 0x3c, 0xbc, 0x1a, 0xe1, 0x15, 0x1e, 0xab, 0x56, 0x74, 0x02, + 0x8b, 0xb3, 0xcd, 0x42, 0x56, 0xcd, 0x9c, 0xc3, 0x15, 0xe2, 0x33, 0x97, + 0x6d, 0x77, 0xdd, 0x20, 0x3a, 0x74, 0xb1, 0x4c, 0xee, 0xeb, 0xe8, 0xaa, + 0x20, 0x71, 0x5a, 0xa2, 0x5b, 0xf8, 0x1a, 0xcb, 0xd2, 0x7b, 0x96, 0xb6, + 0x42, 0xb4, 0x7c, 0x7a, 0x13, 0xec, 0x55, 0xd3, 0x36, 0x8b, 0xe3, 0x17, + 0xc5, 0xc4, 0xcc, 0xe0, 0x27, 0x8c, 0xed, 0xa1, 0x4c, 0x8a, 0x50, 0x4a, + 0x1c, 0xc4, 0x58, 0xf6, 0xcd, 0xcc, 0xc3, 0x5f, 0xe6, 0x3c, 0xff, 0x97, + 0x51, 0xed, 0xf5, 0xaa, 0x89, 0xcc, 0x3f, 0x63, 0x67, 0x46, 0x9f, 0xbf, + 0x02, 0x03, 0x01, 0x00, 0x01 +}; +static const uint8_t icert_public_b[] __unused = { + 0x02, 0x81, 0x81, 0x00, 0xcd, 0x96, 0x81, 0x78, 0xbb, 0x2e, 0x64, 0xda, + 0xd3, 0x7e, 0xd7, 0x3a, 0xac, 0x3f, 0x00, 0xe5, 0x41, 0x65, 0x56, 0xac, + 0x2d, 0x77, 0xc0, 0x1a, 0xad, 0x32, 0xca, 0x0c, 0x72, 0xae, 0xdb, 0x57, + 0xc1, 0xc7, 0x79, 0xef, 0xc6, 0x71, 0x9f, 0xad, 0x82, 0x14, 0x94, 0x4b, + 0xf9, 0xd8, 0x78, 0xf1, 0xca, 0x99, 0xf5, 0x71, 0x07, 0x88, 0xd7, 0x55, + 0xc7, 0xcb, 0x36, 0x5d, 0xdb, 0x84, 0x46, 0xac, 0x05, 0xea, 0xf1, 0xe1, + 0xbe, 0x91, 0x50, 0x85, 0x1e, 0x64, 0xab, 0x02, 0x82, 0xab, 0xba, 0x42, + 0x06, 0x5a, 0xe3, 0xc3, 0x25, 0xd0, 0x95, 0x04, 0x54, 0xb4, 0x44, 0x40, + 0x5a, 0x42, 0x06, 0x04, 0x7d, 0x3b, 0x9e, 0xaf, 0x2e, 0xe9, 0xc8, 0xad, + 0x46, 0x3a, 0xff, 0xe2, 0x39, 0xc8, 0x48, 0x0a, 0x49, 0xaa, 0xfe, 0x1f, + 0x6c, 0x91, 0x5d, 0x1d, 0xd6, 0xb0, 0x04, 0xd1, 0x6c, 0xb2, 0x43, 0xaf, + 0x02, 0x03, 0x01, 0x00, 0x01 +}; + +/** + * "Private" key(s) used for pairing + * + * Yes, this publicly visible "private" key completely obviates any + * nominal security provided by the pairing process. Looked at + * another way, this modifies the iPhone to behave like every other + * USB tethering device: if the cable is physically connected and + * tethering is enabled then the device will Just Work. + * + * Unlike Android, the iPhone seems to have no meaningful permissions + * model: any device that is trusted to use the phone for tethering + * seems to also be trusted to use the iPhone for any other purpose + * (e.g. accessing files, reading messages, etc). Apple should + * probably fix this at some point, e.g. via defining extended key + * usages in the root and host certificates. + */ +static const uint8_t icert_private_a[] __unused = { + 0x02, 0x81, 0x80, 0x1d, 0x60, 0xb7, 0x25, 0xdf, 0x0c, 0x76, 0xc5, 0xf7, + 0xc2, 0xb1, 0x8b, 0x22, 0x2f, 0x21, 0xbd, 0x2f, 0x7d, 0xd5, 0xa1, 0xf6, + 0x01, 0xd5, 0x24, 0x39, 0x55, 0xd4, 0x16, 0xd6, 0xe1, 0x8a, 0x53, 0x26, + 0xf2, 0x3e, 0xc1, 0xc9, 0x4c, 0x33, 0x2e, 0x17, 0x16, 0xec, 0xa7, 0x9e, + 0x3e, 0x1d, 0x4a, 0x66, 0xa7, 0x64, 0x07, 0x48, 0x3d, 0x7a, 0xf3, 0xb6, + 0xdd, 0xf8, 0x56, 0x04, 0x0d, 0x0f, 0xef, 0xf8, 0xbd, 0xbc, 0x73, 0xe2, + 0xc2, 0xae, 0x1b, 0x87, 0x90, 0x18, 0x2a, 0x68, 0xff, 0xae, 0x49, 0xdf, + 0x7c, 0xff, 0xe8, 0x44, 0xa8, 0x3e, 0x4e, 0x4f, 0xf5, 0xfa, 0x51, 0x96, + 0xb8, 0x08, 0xf3, 0x18, 0xd6, 0x52, 0xdf, 0x3a, 0x8a, 0xed, 0xda, 0xcd, + 0xb4, 0x06, 0x99, 0x41, 0xcb, 0x23, 0x17, 0xaf, 0xc3, 0x3e, 0xfe, 0xdf, + 0x97, 0xf3, 0xd6, 0x18, 0x7e, 0x03, 0xaf, 0x62, 0xb2, 0xc8, 0xc9 +}; +static const uint8_t icert_private_b[] __unused = { + 0x02, 0x81, 0x80, 0x45, 0xbd, 0xc0, 0xbe, 0x0c, 0x01, 0x79, 0x05, 0x22, + 0xa9, 0xec, 0xa9, 0x62, 0xb5, 0x1c, 0xc0, 0xa8, 0xa6, 0x8f, 0xf8, 0x68, + 0x94, 0x2e, 0xfe, 0xdd, 0xb2, 0x55, 0x08, 0x53, 0xff, 0x2d, 0x39, 0x5f, + 0xeb, 0x23, 0x5a, 0x4b, 0x9f, 0x4f, 0xe3, 0xb4, 0x34, 0xf6, 0xf9, 0xaf, + 0x0f, 0xd8, 0x37, 0x6d, 0xdb, 0x3c, 0x7f, 0xd3, 0x66, 0x80, 0x66, 0x01, + 0x18, 0xd6, 0xa0, 0x90, 0x4f, 0x17, 0x09, 0xb8, 0x68, 0x44, 0xf0, 0xde, + 0x16, 0x4a, 0x8a, 0x0d, 0xa7, 0x5f, 0xb5, 0x4c, 0x53, 0xcc, 0x21, 0xdd, + 0x4f, 0x05, 0x64, 0xa5, 0xc5, 0xac, 0x2c, 0xd8, 0x0a, 0x7b, 0xf5, 0xa4, + 0x63, 0x32, 0xb0, 0x2c, 0xf8, 0xef, 0x8c, 0xf8, 0x2c, 0xba, 0x1c, 0x2c, + 0xc7, 0x0a, 0xf3, 0xe9, 0x8f, 0xfb, 0x0a, 0x61, 0x1b, 0x3a, 0xdd, 0x9f, + 0x74, 0x7d, 0xb3, 0x42, 0x59, 0x52, 0x07, 0x59, 0x8e, 0xb7, 0x41 +}; + +/** Key pair selection + * + * This exists only to allow for testing of the process for handling a + * failed TLS negotiation. + */ +#define icert_key_suffix a +#define icert_key_variable( prefix ) _C2 ( prefix, icert_key_suffix ) +#define icert_public icert_key_variable ( icert_public_ ) +#define icert_private icert_key_variable ( icert_private_ ) + +/** PEM certificate prefix */ +static const char icert_begin[] = "-----BEGIN CERTIFICATE-----\n"; + +/** PEM certificate suffix */ +static const char icert_end[] = "\n-----END CERTIFICATE-----\n"; + +/** + * Free pairing certificates + * + * @v icert Pairing certificates + */ +static void icert_free ( struct icert *icert ) { + + privkey_put ( icert->key ); + x509_put ( icert->root ); + x509_put ( icert->host ); + x509_put ( icert->device ); + memset ( icert, 0, sizeof ( *icert ) ); +} + +/** + * Construct certificate + * + * @v icert Pairing certificates + * @v subject Subject name + * @v issuer Issuer name + * @v private Private key + * @v public Public key + * @v exts Certificate extensions + * @v cert Certificate to fill in + * @ret rc Return status code + * + * On success, the caller is responsible for eventually calling + * x509_put() on the allocated encoded certificate. + */ +static int icert_cert ( struct icert *icert, struct asn1_cursor *subject, + struct asn1_cursor *issuer, struct asn1_cursor *private, + struct asn1_cursor *public, struct asn1_cursor *exts, + struct x509_certificate **cert ) { + struct digest_algorithm *digest = &sha256_algorithm; + struct pubkey_algorithm *pubkey = &rsa_algorithm; + struct asn1_builder spki = { NULL, 0 }; + struct asn1_builder tbs = { NULL, 0 }; + struct asn1_builder raw = { NULL, 0 }; + uint8_t digest_ctx[SHA256_CTX_SIZE]; + uint8_t digest_out[SHA256_DIGEST_SIZE]; + uint8_t pubkey_ctx[RSA_CTX_SIZE]; + int len; + int rc; + + /* Initialise "private" key */ + if ( ( rc = pubkey_init ( pubkey, pubkey_ctx, private->data, + private->len ) ) != 0 ) { + DBGC ( icert, "ICERT %p could not initialise private key: " + "%s\n", icert, strerror ( rc ) ); + goto err_pubkey_init; + } + + /* Construct subjectPublicKeyInfo */ + if ( ( rc = ( asn1_prepend_raw ( &spki, public->data, public->len ), + asn1_prepend_raw ( &spki, icert_nul, + sizeof ( icert_nul ) ), + asn1_wrap ( &spki, ASN1_BIT_STRING ), + asn1_prepend_raw ( &spki, icert_rsa, + sizeof ( icert_rsa ) ), + asn1_wrap ( &spki, ASN1_SEQUENCE ) ) ) != 0 ) { + DBGC ( icert, "ICERT %p could not build subjectPublicKeyInfo: " + "%s\n", icert, strerror ( rc ) ); + goto err_spki; + } + + /* Construct tbsCertificate */ + if ( ( rc = ( asn1_prepend_raw ( &tbs, exts->data, exts->len ), + asn1_prepend_raw ( &tbs, spki.data, spki.len ), + asn1_prepend_raw ( &tbs, subject->data, subject->len ), + asn1_prepend_raw ( &tbs, icert_validity, + sizeof ( icert_validity ) ), + asn1_prepend_raw ( &tbs, issuer->data, issuer->len ), + asn1_prepend_raw ( &tbs, icert_tbs_prefix, + sizeof ( icert_tbs_prefix ) ), + asn1_wrap ( &tbs, ASN1_SEQUENCE ) ) ) != 0 ) { + DBGC ( icert, "ICERT %p could not build tbsCertificate: %s\n", + icert, strerror ( rc ) ); + goto err_tbs; + } + + /* Calculate certificate digest */ + digest_init ( digest, digest_ctx ); + digest_update ( digest, digest_ctx, tbs.data, tbs.len ); + digest_final ( digest, digest_ctx, digest_out ); + + /* Construct signature */ + if ( ( rc = asn1_grow ( &raw, pubkey_max_len ( pubkey, + pubkey_ctx ) ) ) != 0 ) { + DBGC ( icert, "ICERT %p could not build signature: %s\n", + icert, strerror ( rc ) ); + goto err_grow; + } + if ( ( len = pubkey_sign ( pubkey, pubkey_ctx, digest, digest_out, + raw.data ) ) < 0 ) { + rc = len; + DBGC ( icert, "ICERT %p could not sign: %s\n", + icert, strerror ( rc ) ); + goto err_pubkey_sign; + } + assert ( ( ( size_t ) len ) == raw.len ); + + /* Construct raw certificate data */ + if ( ( rc = ( asn1_prepend_raw ( &raw, icert_nul, + sizeof ( icert_nul ) ), + asn1_wrap ( &raw, ASN1_BIT_STRING ), + asn1_prepend_raw ( &raw, icert_sha256_rsa, + sizeof ( icert_sha256_rsa ) ), + asn1_prepend_raw ( &raw, tbs.data, tbs.len ), + asn1_wrap ( &raw, ASN1_SEQUENCE ) ) ) != 0 ) { + DBGC ( icert, "ICERT %p could not build certificate: %s\n", + icert, strerror ( rc ) ); + goto err_raw; + } + + /* Parse certificate */ + if ( ( rc = x509_certificate ( raw.data, raw.len, cert ) ) != 0 ) { + DBGC ( icert, "ICERT %p invalid certificate: %s\n", + icert, strerror ( rc ) ); + DBGC_HDA ( icert, 0, raw.data, raw.len ); + goto err_x509; + } + + err_x509: + err_raw: + err_pubkey_sign: + free ( raw.data ); + err_grow: + free ( tbs.data ); + err_tbs: + free ( spki.data ); + err_spki: + pubkey_final ( pubkey, pubkey_ctx ); + err_pubkey_init: + return rc; +} + +/** + * Construct certificates + * + * @v icert Certificate set + * @v pubkey Device public key + * @ret rc Return status code + */ +static int icert_certs ( struct icert *icert, struct asn1_cursor *key ) { + struct digest_algorithm *digest = icert_root.digest; + struct asn1_builder public = { NULL, 0 }; + struct asn1_builder *private; + int rc; + + /* Free any existing key and certificates */ + icert_free ( icert ); + + /* Allocate "private" key */ + icert->key = zalloc ( sizeof ( *icert->key ) ); + if ( ! icert->key ) { + rc = -ENOMEM; + goto error; + } + privkey_init ( icert->key ); + private = &icert->key->builder; + + /* Construct our "private" key */ + if ( ( rc = ( asn1_prepend_raw ( private, icert_private, + sizeof ( icert_private ) ), + asn1_prepend_raw ( private, icert_public, + sizeof ( icert_public ) ), + asn1_prepend ( private, ASN1_INTEGER, icert_nul, + sizeof ( icert_nul ) ), + asn1_wrap ( private, ASN1_SEQUENCE ) ) ) != 0 ) { + DBGC ( icert, "ICERT %p could not build private key: %s\n", + icert, strerror ( rc ) ); + goto error; + } + + /* Construct our own public key */ + if ( ( rc = ( asn1_prepend_raw ( &public, icert_public, + sizeof ( icert_public ) ), + asn1_wrap ( &public, ASN1_SEQUENCE ) ) ) != 0 ) { + DBGC ( icert, "ICERT %p could not build public key: %s\n", + icert, strerror ( rc ) ); + goto error; + } + + /* Construct root certificate */ + if ( ( rc = icert_cert ( icert, &icert_name_root, &icert_name_root, + asn1_built ( private ), asn1_built ( &public ), + &icert_root_exts, &icert->root ) ) != 0 ) + goto error; + + /* Construct host certificate */ + if ( ( rc = icert_cert ( icert, &icert_name_ipxe, &icert_name_root, + asn1_built ( private ), asn1_built ( &public ), + &icert_leaf_exts, &icert->host ) ) != 0 ) + goto error; + + /* Construct device certificate */ + if ( ( rc = icert_cert ( icert, &icert_name_iphone, &icert_name_root, + asn1_built ( private ), key, + &icert_leaf_exts, &icert->device ) ) != 0 ) + goto error; + + /* Construct root of trust */ + assert ( digest->digestsize == sizeof ( icert_root_fingerprint ) ); + x509_fingerprint ( icert->root, digest, icert_root_fingerprint ); + + /* Free constructed keys */ + free ( public.data ); + return 0; + + error: + icert_free ( icert ); + free ( public.data ); + return rc; +} + +/** + * Construct doubly base64-encoded certificate + * + * @v icert Pairing certificates + * @v cert X.509 certificate + * @v encenc Doubly base64-encoded certificate to construct + * @ret rc Return status code + * + * On success, the caller is responsible for eventually calling free() + * on the allocated doubly encoded encoded certificate. + */ +static int icert_encode ( struct icert *icert, struct x509_certificate *cert, + char **encenc ) { + size_t encencoded_len; + size_t encoded_len; + size_t pem_len; + char *pem; + int rc; + + /* Sanity check */ + assert ( cert != NULL ); + + /* Create PEM */ + encoded_len = ( base64_encoded_len ( cert->raw.len ) + 1 /* NUL */ ); + pem_len = ( ( sizeof ( icert_begin ) - 1 /* NUL */ ) + + ( encoded_len - 1 /* NUL */ ) + + ( sizeof ( icert_end ) - 1 /* NUL */ ) + + 1 /* NUL */ ); + pem = malloc ( pem_len ); + if ( ! pem ) { + rc = -ENOMEM; + goto err_alloc_pem; + } + strcpy ( pem, icert_begin ); + base64_encode ( cert->raw.data, cert->raw.len, + ( pem + sizeof ( icert_begin ) - 1 /* NUL */ ), + encoded_len ); + strcpy ( ( pem + + ( sizeof ( icert_begin ) - 1 /* NUL */ ) + + ( encoded_len - 1 /* NUL */ ) ), icert_end ); + DBGC2 ( icert, "ICERT %p \"%s\" certificate:\n%s", + icert, x509_name ( cert ), pem ); + + /* Base64-encode the PEM (sic) */ + encencoded_len = ( base64_encoded_len ( pem_len - 1 /* NUL */ ) + + 1 /* NUL */ ); + *encenc = malloc ( encencoded_len ); + if ( ! *encenc ) { + rc = -ENOMEM; + goto err_alloc_encenc; + } + base64_encode ( pem, ( pem_len - 1 /* NUL */ ), *encenc, + encencoded_len ); + + /* Success */ + rc = 0; + + err_alloc_encenc: + free ( pem ); + err_alloc_pem: + return rc; +} + +/****************************************************************************** + * + * iPhone USB multiplexer + * + ****************************************************************************** + * + * The iPhone USB multiplexer speaks a protocol that is almost, but + * not quite, entirely unlike TCP. + * + */ + +/** + * Transmit message + * + * @v imux USB multiplexer + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int imux_tx ( struct imux *imux, struct io_buffer *iobuf ) { + struct imux_header *hdr = iobuf->data; + size_t len = iob_len ( iobuf ); + int rc; + + /* Populate header */ + assert ( len >= sizeof ( *hdr ) ); + hdr->len = htonl ( len ); + hdr->in_seq = htons ( imux->in_seq ); + hdr->out_seq = htons ( imux->out_seq ); + DBGCP ( imux, "IMUX %p transmitting:\n", imux ); + DBGCP_HDA ( imux, 0, hdr, len ); + + /* Transmit message */ + if ( ( rc = usb_stream ( &imux->usbnet.out, iobuf, 1 ) ) != 0 ) + goto err; + + /* Increment sequence number */ + imux->out_seq++; + + return 0; + + err: + free_iob ( iobuf ); + return rc; +} + +/** + * Transmit version message + * + * @v imux USB multiplexer + * @ret rc Return status code + */ +static int imux_tx_version ( struct imux *imux ) { + struct io_buffer *iobuf; + struct imux_header_version *vers; + int rc; + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( sizeof ( *vers ) ); + if ( ! iobuf ) + return -ENOMEM; + vers = iob_put ( iobuf, sizeof ( *vers ) ); + + /* Construct version message */ + memset ( vers, 0, sizeof ( *vers ) ); + vers->hdr.protocol = htonl ( IMUX_VERSION ); + + /* Transmit message */ + if ( ( rc = imux_tx ( imux, iob_disown ( iobuf ) ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Transmit pseudo-TCP message + * + * @v imux USB multiplexer + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int imux_tx_tcp ( struct imux *imux, struct io_buffer *iobuf ) { + struct imux_header_tcp *tcp = iobuf->data; + size_t len = iob_len ( iobuf ); + int rc; + + /* Populate TCP header */ + assert ( len >= sizeof ( *tcp ) ); + tcp->hdr.protocol = htonl ( IMUX_TCP ); + tcp->tcp.src = htons ( imux->port ); + tcp->tcp.dest = htons ( IMUX_PORT_LOCKDOWND ); + tcp->tcp.seq = htonl ( imux->tcp_seq ); + tcp->tcp.ack = htonl ( imux->tcp_ack ); + tcp->tcp.hlen = ( ( sizeof ( tcp->tcp ) / 4 ) << 4 ); + tcp->tcp.win = htons ( IMUX_WINDOW ); + + /* Transmit message */ + if ( ( rc = imux_tx ( imux, iob_disown ( iobuf ) ) ) != 0 ) + return rc; + + /* Update TCP sequence */ + imux->tcp_seq += ( len - sizeof ( *tcp ) ); + + return 0; +} + +/** + * Transmit pseudo-TCP SYN + * + * @v imux USB multiplexer + * @ret rc Return status code + */ +static int imux_tx_syn ( struct imux *imux ) { + struct io_buffer *iobuf; + struct imux_header_tcp *syn; + int rc; + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( sizeof ( *syn ) ); + if ( ! iobuf ) + return -ENOMEM; + syn = iob_put ( iobuf, sizeof ( *syn ) ); + + /* Construct TCP SYN message */ + memset ( syn, 0, sizeof ( *syn ) ); + syn->tcp.flags = TCP_SYN; + + /* Transmit message */ + if ( ( rc = imux_tx_tcp ( imux, iob_disown ( iobuf ) ) ) != 0 ) + return rc; + + /* Increment TCP sequence to compensate for SYN */ + imux->tcp_seq++; + + return 0; +} + +/** + * Open pairing client + * + * @v imux USB multiplexer + * @ret rc Return status code + */ +static int imux_start_pair ( struct imux *imux ) { + int rc; + + /* Disconnect any existing pairing client */ + intf_restart ( &imux->tcp, -EPIPE ); + + /* Create pairing client */ + if ( ( rc = ipair_create ( &imux->tcp, imux->flags ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Receive version message + * + * @v imux USB multiplexer + */ +static void imux_rx_version ( struct imux *imux ) { + + /* Reset output sequence */ + imux->out_seq = 0; + + /* Send TCP SYN */ + imux->action = imux_tx_syn; +} + +/** + * Receive log message + * + * @v imux USB multiplexer + * @v hdr Message header + * @v len Length of message + */ +static void imux_rx_log ( struct imux *imux, struct imux_header *hdr, + size_t len ) { + struct imux_header_log *log = + container_of ( hdr, struct imux_header_log, hdr ); + unsigned int level; + size_t msg_len; + char *tmp; + + /* Sanity check */ + if ( len < sizeof ( *log ) ) { + DBGC ( imux, "IMUX %p malformed log message:\n", imux ); + DBGC_HDA ( imux, 0, log, len ); + return; + } + + /* First byte is the log level, followed by a printable + * message with no NUL terminator. Extract the log level, + * then shuffle the message down within the buffer and append + * a NUL terminator. + */ + msg_len = ( len - sizeof ( *hdr ) ); + level = log->level; + tmp = ( ( void * ) &log->level ); + memmove ( tmp, &log->msg, msg_len ); + tmp[msg_len] = '\0'; + + /* Print log message */ + DBGC ( imux, "IMUX %p <%d>: %s\n", imux, level, tmp ); +} + +/** + * Receive pseudo-TCP SYN+ACK + * + * @v imux USB multiplexer + */ +static void imux_rx_syn ( struct imux *imux ) { + + /* Increment TCP acknowledgement to compensate for SYN */ + imux->tcp_ack++; + + /* Start pairing client */ + imux->action = imux_start_pair; +} + +/** + * Receive pseudo-TCP message + * + * @v imux USB multiplexer + * @v iobuf I/O buffer + */ +static void imux_rx_tcp ( struct imux *imux, struct io_buffer *iobuf ) { + struct imux_header_tcp *tcp = iobuf->data; + size_t len = iob_len ( iobuf ); + int rc; + + /* Sanity check */ + if ( len < sizeof ( *tcp ) ) { + DBGC ( imux, "IMUX %p malformed TCP message:\n", imux ); + DBGC_HDA ( imux, 0, tcp, len ); + goto error; + } + + /* Ignore unexpected packets */ + if ( tcp->tcp.dest != htons ( imux->port ) ) { + DBGC ( imux, "IMUX %p ignoring unexpected TCP port %d:\n", + imux, ntohs ( tcp->tcp.dest ) ); + DBGC_HDA ( imux, 0, tcp, len ); + goto error; + } + + /* Ignore resets */ + if ( tcp->tcp.flags & TCP_RST ) { + DBGC ( imux, "IMUX %p ignoring TCP RST\n", imux ); + DBGC2_HDA ( imux, 0, tcp, len ); + goto error; + } + + /* Record ACK number */ + imux->tcp_ack = ( ntohl ( tcp->tcp.seq ) + len - sizeof ( *tcp ) ); + + /* Handle received message */ + if ( tcp->tcp.flags & TCP_SYN ) { + + /* Received SYN+ACK */ + imux_rx_syn ( imux ); + + } else { + + /* Strip header */ + iob_pull ( iobuf, sizeof ( *tcp ) ); + + /* Deliver via socket */ + if ( ( rc = xfer_deliver_iob ( &imux->tcp, + iob_disown ( iobuf ) ) ) != 0 ) + goto error; + } + + error: + free_iob ( iobuf ); +} + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void imux_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct imux *imux = container_of ( ep, struct imux, usbnet.in ); + struct imux_header *hdr = iobuf->data; + size_t len = iob_len ( iobuf ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto drop; + + /* Report USB errors */ + if ( rc != 0 ) { + DBGC ( imux, "IMUX %p bulk IN failed: %s\n", + imux, strerror ( rc ) ); + goto drop; + } + + /* Sanity check */ + if ( len < sizeof ( *hdr ) ) { + DBGC ( imux, "IMUX %p malformed message:\n", imux ); + DBGC_HDA ( imux, 0, hdr, len ); + goto drop; + } + + /* Record input sequence */ + imux->in_seq = ntohs ( hdr->in_seq ); + + /* Handle according to protocol */ + DBGCP ( imux, "IMUX %p received:\n", imux ); + DBGCP_HDA ( imux, 0, hdr, len ); + switch ( hdr->protocol ) { + case htonl ( IMUX_VERSION ): + imux_rx_version ( imux ); + break; + case htonl ( IMUX_LOG ): + imux_rx_log ( imux, hdr, len ); + break; + case htonl ( IMUX_TCP ): + imux_rx_tcp ( imux, iob_disown ( iobuf ) ); + break; + default: + DBGC ( imux, "IMUX %p unknown message type %d:\n", + imux, ntohl ( hdr->protocol ) ); + DBGC_HDA ( imux, 0, hdr, len ); + break; + } + + drop: + free_iob ( iobuf ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations imux_in_operations = { + .complete = imux_in_complete, +}; + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void imux_out_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct imux *imux = container_of ( ep, struct imux, usbnet.out ); + + /* Report USB errors */ + if ( rc != 0 ) { + DBGC ( imux, "IMUX %p bulk OUT failed: %s\n", + imux, strerror ( rc ) ); + goto error; + } + + error: + free_iob ( iobuf ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations imux_out_operations = { + .complete = imux_out_complete, +}; + +/** + * Shut down USB multiplexer + * + * @v imux USB multiplexer + */ +static void imux_shutdown ( struct imux *imux ) { + + /* Shut down interfaces */ + intf_shutdown ( &imux->tcp, -ECANCELED ); + + /* Close USB network device, if open */ + if ( process_running ( &imux->process ) ) { + process_del ( &imux->process ); + usbnet_close ( &imux->usbnet ); + } +} + +/** + * Close USB multiplexer + * + * @v imux USB multiplexer + * @v rc Reason for close + */ +static void imux_close ( struct imux *imux, int rc ) { + struct iphone *iphone; + + /* Restart interfaces */ + intf_restart ( &imux->tcp, rc ); + + /* Record pairing status */ + imux->rc = rc; + + /* Trigger link check on any associated iPhones */ + list_for_each_entry ( iphone, &iphones, list ) { + if ( iphone->usb == imux->usb ) + start_timer_nodelay ( &iphone->timer ); + } + + /* Retry pairing on any error */ + if ( rc != 0 ) { + + /* Increment port number */ + imux->port++; + + /* Request pairing on any retry attempt */ + imux->flags = IPAIR_REQUEST; + + /* Send new pseudo-TCP SYN */ + imux->action = imux_tx_syn; + + DBGC ( imux, "IMUX %p retrying pairing: %s\n", + imux, strerror ( rc ) ); + return; + } + + /* Shut down multiplexer on pairing success */ + imux_shutdown ( imux ); +} + +/** + * Allocate I/O buffer for pseudo-TCP socket + * + * @v imux USB multiplexer + * @v len I/O buffer payload length + * @ret iobuf I/O buffer + */ +static struct io_buffer * imux_alloc_iob ( struct imux *imux __unused, + size_t len ) { + struct imux_header_tcp *tcp; + struct io_buffer *iobuf; + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( sizeof ( *tcp ) + len ); + if ( ! iobuf ) + return NULL; + + /* Reserve space for pseudo-TCP message header */ + iob_reserve ( iobuf, sizeof ( *tcp ) ); + + return iobuf; +} + +/** + * Transmit packet via pseudo-TCP socket + * + * @v imux USB multiplexer + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int imux_deliver ( struct imux *imux, struct io_buffer *iobuf, + struct xfer_metadata *meta __unused ) { + struct imux_header_tcp *tcp; + + /* Prepend pseudo-TCP header */ + tcp = iob_push ( iobuf, sizeof ( *tcp ) ); + memset ( tcp, 0, sizeof ( *tcp ) ); + tcp->tcp.flags = TCP_ACK; + + /* Transmit pseudo-TCP packet */ + return imux_tx_tcp ( imux, iob_disown ( iobuf ) ); +} + +/** Pseudo-TCP socket interface operations */ +static struct interface_operation imux_tcp_operations[] = { + INTF_OP ( xfer_deliver, struct imux *, imux_deliver ), + INTF_OP ( xfer_alloc_iob, struct imux *, imux_alloc_iob ), + INTF_OP ( intf_close, struct imux *, imux_close ), +}; + +/** Pseudo-TCP socket interface descriptor */ +static struct interface_descriptor imux_tcp_desc = + INTF_DESC ( struct imux, tcp, imux_tcp_operations ); + +/** + * Multiplexer process + * + * @v imux USB multiplexer + */ +static void imux_step ( struct imux *imux ) { + int rc; + + /* Poll USB bus */ + usb_poll ( imux->bus ); + + /* Do nothing more if multiplexer has been closed */ + if ( ! process_running ( &imux->process ) ) + return; + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &imux->usbnet ) ) != 0 ) { + /* Wait for next poll */ + return; + } + + /* Perform pending action, if any */ + if ( imux->action ) { + if ( ( rc = imux->action ( imux ) ) != 0 ) + imux_close ( imux, rc ); + imux->action = NULL; + } +} + +/** Multiplexer process descriptor */ +static struct process_descriptor imux_process_desc = + PROC_DESC ( struct imux, process, imux_step ); + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int imux_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct imux *imux; + int rc; + + /* Allocate and initialise structure */ + imux = zalloc ( sizeof ( *imux ) ); + if ( ! imux ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &imux->refcnt, NULL ); + imux->usb = usb; + imux->bus = usb->port->hub->bus; + usbnet_init ( &imux->usbnet, func, NULL, &imux_in_operations, + &imux_out_operations ); + usb_refill_init ( &imux->usbnet.in, 0, IMUX_IN_MTU, IMUX_IN_MAX_FILL ); + process_init ( &imux->process, &imux_process_desc, &imux->refcnt ); + imux->action = imux_tx_version; + imux->port = IMUX_PORT_LOCAL; + intf_init ( &imux->tcp, &imux_tcp_desc, &imux->refcnt ); + imux->rc = -EINPROGRESS_PAIRING; + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &imux->usbnet, config ) ) != 0 ) { + DBGC ( imux, "IMUX %p could not describe: %s\n", + imux, strerror ( rc ) ); + goto err_describe; + } + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &imux->usbnet ) ) != 0 ) { + DBGC ( imux, "IMUX %p could not open: %s\n", + imux, strerror ( rc ) ); + goto err_open; + } + + /* Start polling process */ + process_add ( &imux->process ); + + /* Add to list of multiplexers */ + list_add ( &imux->list, &imuxes ); + + usb_func_set_drvdata ( func, imux ); + return 0; + + list_del ( &imux->list ); + imux_shutdown ( imux ); + err_open: + err_describe: + ref_put ( &imux->refcnt ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void imux_remove ( struct usb_function *func ) { + struct imux *imux = usb_func_get_drvdata ( func ); + + list_del ( &imux->list ); + imux_shutdown ( imux ); + ref_put ( &imux->refcnt ); +} + +/** USB multiplexer device IDs */ +static struct usb_device_id imux_ids[] = { + { + .name = "imux", + .vendor = 0x05ac, + .product = USB_ANY_ID, + }, +}; + +/** USB multiplexer driver */ +struct usb_driver imux_driver __usb_driver = { + .ids = imux_ids, + .id_count = ( sizeof ( imux_ids ) / sizeof ( imux_ids[0] ) ), + .class = USB_CLASS_ID ( 0xff, 0xfe, 0x02 ), + .score = USB_SCORE_NORMAL, + .probe = imux_probe, + .remove = imux_remove, +}; + +/****************************************************************************** + * + * iPhone pairing client + * + ****************************************************************************** + */ + +/** Common prefix for all pairing messages */ +static const char ipair_prefix[] = + "\n" + "\n" + "\n" + "\n" + "Label\n" + "iPXE\n" + "Request\n"; + +/** Common suffix for all pairing messages */ +static const char ipair_suffix[] = + "\n" + "\n"; + +/** Arbitrary system BUID used for pairing */ +static const char ipair_system_buid[] = "E4DB92D2-248A-469A-AC34-92045D07E695"; + +/** Arbitrary host ID used for pairing */ +static const char ipair_host_id[] = "93CEBC27-8457-4804-9108-F42549DF6143"; + +static int ipair_tx_pubkey ( struct ipair *ipair ); +static int ipair_rx_pubkey ( struct ipair *ipair, char *msg ); +static int ipair_tx_pair ( struct ipair *ipair ); +static int ipair_rx_pair ( struct ipair *ipair, char *msg ); +static int ipair_tx_session ( struct ipair *ipair ); +static int ipair_rx_session ( struct ipair *ipair, char *msg ); + +/** + * Free pairing client + * + * @v refcnt Reference counter + */ +static void ipair_free ( struct refcnt *refcnt ) { + struct ipair *ipair = container_of ( refcnt, struct ipair, refcnt ); + + icert_free ( &ipair->icert ); + free ( ipair ); +} + +/** + * Shut down pairing client + * + * @v ipair Pairing client + * @v rc Reason for close + */ +static void ipair_close ( struct ipair *ipair, int rc ) { + + /* Shut down interfaces */ + intf_shutdown ( &ipair->xfer, rc ); + + /* Stop timer */ + stop_timer ( &ipair->timer ); +} + +/** + * Transmit XML message + * + * @v ipair Pairing client + * @v fmt Format string + * @v ... Arguments + * @ret rc Return status code + */ +static int __attribute__ (( format ( printf, 2, 3 ) )) +ipair_tx ( struct ipair *ipair, const char *fmt, ... ) { + struct io_buffer *iobuf; + struct ipair_header *hdr; + va_list args; + size_t len; + char *msg; + int rc; + + /* Calculate length of formatted string */ + va_start ( args, fmt ); + len = ( vsnprintf ( NULL, 0, fmt, args ) + 1 /* NUL */ ); + va_end ( args ); + + /* Allocate I/O buffer */ + iobuf = xfer_alloc_iob ( &ipair->xfer, ( sizeof ( *hdr ) + len ) ); + if ( ! iobuf ) + return -ENOMEM; + hdr = iob_put ( iobuf, sizeof ( *hdr ) ); + + /* Construct XML message */ + memset ( hdr, 0, sizeof ( *hdr ) ); + hdr->len = htonl ( len ); + msg = iob_put ( iobuf, len ); + vsnprintf ( msg, len, fmt, args ); + DBGC2 ( ipair, "IPAIR %p transmitting:\n%s\n", ipair, msg ); + + /* Transmit message */ + if ( ( rc = xfer_deliver_iob ( &ipair->xfer, + iob_disown ( iobuf ) ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Receive XML message payload + * + * @v ipair Pairing client + * @v msg Message payload + * @v len Length of message + * @ret rc Return status code + */ +static int ipair_rx ( struct ipair *ipair, char *msg, size_t len ) { + int ( * rx ) ( struct ipair *ipair, char *msg ); + int rc; + + /* Ignore empty messages */ + if ( ! len ) + return 0; + + /* Sanity check */ + if ( ( msg[ len - 1 ] != '\0' ) && ( msg[ len - 1 ] != '\n' ) ) { + DBGC ( ipair, "IPAIR %p malformed XML:\n", ipair ); + DBGC_HDA ( ipair, 0, msg, len ); + return -EPROTO; + } + + /* Add NUL terminator (potentially overwriting final newline) */ + msg[ len - 1 ] = '\0'; + DBGC2 ( ipair, "IPAIR %p received:\n%s\n\n", ipair, msg ); + + /* Handle according to current state */ + rx = ipair->rx; + if ( ! rx ) { + DBGC ( ipair, "IPAIR %p unexpected XML:\n%s\n", ipair, msg ); + return -EPROTO; + } + ipair->rx = NULL; + if ( ( rc = rx ( ipair, msg ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Locate XML tag + * + * @v ipair Pairing client + * @v msg XML message + * @v tag Tag name + * @ret start Start of tag content + * @ret end End of tag content + * @ret rc Return status code + */ +static int ipair_tag ( struct ipair *ipair, const char *msg, const char *tag, + char **start, char **end ) { + char buf[ 2 /* "" */ + 1 /* NUL */ ]; + + /* Locate opening tag */ + sprintf ( buf, "<%s>", tag ); + *start = strstr ( msg, buf ); + if ( ! *start ) + return -ENOENT; + *start += strlen ( buf ); + + /* Locate closing tag */ + sprintf ( buf, "", tag ); + *end = strstr ( *start, buf ); + if ( ! *end ) { + DBGC ( ipair, "IPAIR %p missing closing tag %s in:\n%s\n", + ipair, buf, msg ); + return -ENOENT; + } + + return 0; +} + +/** + * Locate XML property list dictionary value + * + * @v ipair Pairing client + * @v msg XML message + * @v key Key name + * @v type Key type + * @ret start Start of value content + * @ret end End of value content + * @ret rc Return status code + */ +static int ipair_key ( struct ipair *ipair, const char *msg, const char *key, + const char *type, char **start, char **end ) { + int rc; + + /* Iterate over keys */ + while ( 1 ) { + + /* Locate key */ + if ( ( rc = ipair_tag ( ipair, msg, "key", start, + end ) ) != 0 ) + return rc; + msg = *end; + + /* Check key name */ + if ( memcmp ( *start, key, ( *end - *start ) ) != 0 ) + continue; + + /* Locate value */ + return ipair_tag ( ipair, msg, type, start, end ); + } +} + +/** + * Transmit DevicePublicKey message + * + * @v ipair Pairing client + * @ret rc Return status code + */ +static int ipair_tx_pubkey ( struct ipair *ipair ) { + int rc; + + /* Transmit message */ + if ( ( rc = ipair_tx ( ipair, + "%s" + "GetValue\n" + "Key\n" + "DevicePublicKey\n" + "%s", + ipair_prefix, ipair_suffix ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Receive DevicePublicKey message + * + * @v ipair Pairing client + * @v msg XML message + * @ret rc Return status code + */ +static int ipair_rx_pubkey ( struct ipair *ipair, char *msg ) { + struct asn1_cursor *key; + char *data; + char *end; + char *decoded; + size_t max_len; + int len; + int next; + int rc; + + /* Locate "Value" value */ + if ( ( rc = ipair_key ( ipair, msg, "Value", "data", &data, + &end ) ) != 0 ) { + DBGC ( ipair, "IPAIR %p unexpected public key message:\n%s\n", + ipair, msg ); + goto err_tag; + } + *end = '\0'; + + /* Decode outer layer of Base64 */ + max_len = base64_decoded_max_len ( data ); + decoded = malloc ( max_len ); + if ( ! decoded ) { + rc = -ENOMEM; + goto err_alloc; + } + len = base64_decode ( data, decoded, max_len ); + if ( len < 0 ) { + rc = len; + DBGC ( ipair, "IPAIR %p invalid outer public key:\n%s\n", + ipair, data ); + goto err_decode; + } + + /* Decode inner layer of Base64 */ + next = pem_asn1 ( virt_to_user ( decoded ), len, 0, &key ); + if ( next < 0 ) { + rc = next; + DBGC ( ipair, "IPAIR %p invalid inner public key:\n%s\n", + ipair, decoded ); + goto err_asn1; + } + DBGC ( ipair, "IPAIR %p received public key\n", ipair ); + DBGC2_HDA ( ipair, 0, key->data, key->len ); + + /* Construct certificates */ + if ( ( rc = icert_certs ( &ipair->icert, key ) ) != 0 ) + goto err_certs; + + /* Send session request or pair request as applicable */ + if ( ipair->flags & IPAIR_REQUEST ) { + ipair->tx = ipair_tx_pair; + ipair->rx = ipair_rx_pair; + } else { + ipair->tx = ipair_tx_session; + ipair->rx = ipair_rx_session; + } + start_timer_nodelay ( &ipair->timer ); + + /* Free key */ + free ( key ); + + /* Free intermediate Base64 */ + free ( decoded ); + + return 0; + + err_certs: + free ( key ); + err_asn1: + err_decode: + free ( decoded ); + err_alloc: + err_tag: + return rc; +} + +/** + * Transmit Pair message + * + * @v ipair Pairing client + * @ret rc Return status code + */ +static int ipair_tx_pair ( struct ipair *ipair ) { + char *root; + char *host; + char *device; + int rc; + + /* Construct doubly encoded certificates */ + if ( ( rc = icert_encode ( &ipair->icert, ipair->icert.root, + &root ) ) != 0 ) + goto err_root; + if ( ( rc = icert_encode ( &ipair->icert, ipair->icert.host, + &host ) ) != 0 ) + goto err_host; + if ( ( rc = icert_encode ( &ipair->icert, ipair->icert.device, + &device ) ) != 0 ) + goto err_device; + + /* Transmit message */ + if ( ( rc = ipair_tx ( ipair, + "%s" + "Pair\n" + "PairRecord\n" + "\n" + "RootCertificate\n" + "%s\n" + "HostCertificate\n" + "%s\n" + "DeviceCertificate\n" + "%s\n" + "SystemBUID\n" + "%s\n" + "HostID\n" + "%s\n" + "\n" + "ProtocolVersion\n" + "2\n" + "PairingOptions\n" + "\n" + "ExtendedPairingErrors\n" + "\n" + "\n" + "%s", + ipair_prefix, root, host, device, + ipair_system_buid, ipair_host_id, + ipair_suffix + ) ) != 0 ) + goto err_tx; + + err_tx: + free ( device ); + err_device: + free ( host ); + err_host: + free ( root ); + err_root: + return rc; +} + +/** + * Receive Pair message error + * + * @v ipair Pairing client + * @v error Pairing error + * @ret rc Return status code + */ +static int ipair_rx_pair_error ( struct ipair *ipair, char *error ) { + + /* Check for actual errors */ + if ( strcmp ( error, "PairingDialogResponsePending" ) != 0 ) { + DBGC ( ipair, "IPAIR %p pairing error \"%s\"\n", ipair, error ); + return -EPERM; + } + + /* Retransmit pairing request */ + ipair->tx = ipair_tx_pair; + ipair->rx = ipair_rx_pair; + start_timer_fixed ( &ipair->timer, IPAIR_RETRY_DELAY ); + + DBGC ( ipair, "IPAIR %p waiting for pairing dialog\n", ipair ); + return 0; +} + +/** + * Receive Pair message + * + * @v ipair Pairing client + * @v msg XML message + * @ret rc Return status code + */ +static int ipair_rx_pair ( struct ipair *ipair, char *msg ) { + char *error; + char *escrow; + char *end; + int rc; + + /* Check for pairing errors */ + if ( ( rc = ipair_key ( ipair, msg, "Error", "string", &error, + &end ) ) == 0 ) { + *end = '\0'; + return ipair_rx_pair_error ( ipair, error ); + } + + /* Get EscrowBag */ + if ( ( rc = ipair_key ( ipair, msg, "EscrowBag", "data", &escrow, + &end ) ) != 0 ) { + DBGC ( ipair, "IPAIR %p unexpected pairing response:\n%s\n", + ipair, msg ); + return rc; + } + DBGC ( ipair, "IPAIR %p pairing successful\n", ipair ); + + /* Send session request */ + ipair->tx = ipair_tx_session; + ipair->rx = ipair_rx_session; + start_timer_nodelay ( &ipair->timer ); + + return 0; +} + +/** + * Transmit StartSession message + * + * @v ipair Pairing client + * @ret rc Return status code + */ +static int ipair_tx_session ( struct ipair *ipair ) { + int rc; + + /* Transmit message */ + if ( ( rc = ipair_tx ( ipair, + "%s" + "StartSession\n" + "SystemBUID\n" + "%s\n" + "HostID\n" + "%s\n" + "%s", + ipair_prefix, ipair_system_buid, + ipair_host_id, ipair_suffix + ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Receive StartSession message error + * + * @v ipair Pairing client + * @v error Pairing error + * @ret rc Return status code + */ +static int ipair_rx_session_error ( struct ipair *ipair, char *error ) { + + /* Check for actual errors */ + if ( strcmp ( error, "InvalidHostID" ) != 0 ) { + DBGC ( ipair, "IPAIR %p session error \"%s\"\n", ipair, error ); + return -EPERM; + } + + /* Transmit pairing request */ + ipair->tx = ipair_tx_pair; + ipair->rx = ipair_rx_pair; + start_timer_nodelay ( &ipair->timer ); + + DBGC ( ipair, "IPAIR %p unknown host: requesting pairing\n", ipair ); + return 0; +} + +/** + * Receive StartSession message + * + * @v ipair Pairing client + * @v msg XML message + * @ret rc Return status code + */ +static int ipair_rx_session ( struct ipair *ipair, char *msg ) { + char *error; + char *session; + char *end; + int rc; + + /* Check for session errors */ + if ( ( rc = ipair_key ( ipair, msg, "Error", "string", &error, + &end ) ) == 0 ) { + *end = '\0'; + return ipair_rx_session_error ( ipair, error ); + } + + /* Check for session ID */ + if ( ( rc = ipair_key ( ipair, msg, "SessionID", "string", &session, + &end ) ) != 0 ) { + DBGC ( ipair, "IPAIR %p unexpected session response:\n%s\n", + ipair, msg ); + return rc; + } + *end = '\0'; + DBGC ( ipair, "IPAIR %p starting session \"%s\"\n", ipair, session ); + + /* Start TLS */ + if ( ( rc = add_tls ( &ipair->xfer, "iPhone", &icert_root, + ipair->icert.key ) ) != 0 ) { + DBGC ( ipair, "IPAIR %p could not start TLS: %s\n", + ipair, strerror ( rc ) ); + return rc; + } + + /* Record that TLS has been started */ + ipair->flags |= IPAIR_TLS; + + return 0; +} + +/** + * Handle window change notification + * + * @v ipair Pairing client + */ +static void ipair_window_changed ( struct ipair *ipair ) { + + /* Report pairing as complete once TLS session has been established */ + if ( ( ipair->flags & IPAIR_TLS ) && xfer_window ( &ipair->xfer ) ) { + + /* Sanity checks */ + assert ( x509_is_valid ( ipair->icert.root, &icert_root ) ); + assert ( x509_is_valid ( ipair->icert.device, &icert_root ) ); + assert ( ! x509_is_valid ( ipair->icert.root, NULL ) ); + assert ( ! x509_is_valid ( ipair->icert.host, NULL ) ); + assert ( ! x509_is_valid ( ipair->icert.device, NULL ) ); + + /* Report pairing as complete */ + DBGC ( ipair, "IPAIR %p established TLS session\n", ipair ); + ipair_close ( ipair, 0 ); + return; + } +} + +/** + * Handle received data + * + * @v ipair Pairing client + * @v iobuf I/O buffer + * @v meta Data transfer metadata + * @ret rc Return status code + */ +static int ipair_deliver ( struct ipair *ipair, struct io_buffer *iobuf, + struct xfer_metadata *meta __unused ) { + struct ipair_header *hdr; + int rc; + + /* Strip header (which may appear in a separate packet) */ + if ( ( ! ( ipair->flags & IPAIR_RX_LEN ) ) && + ( iob_len ( iobuf ) >= sizeof ( *hdr ) ) ) { + iob_pull ( iobuf, sizeof ( *hdr ) ); + ipair->flags |= IPAIR_RX_LEN; + } + + /* Clear received header flag if we have a message */ + if ( iob_len ( iobuf ) ) + ipair->flags &= ~IPAIR_RX_LEN; + + /* Receive message */ + if ( ( rc = ipair_rx ( ipair, iobuf->data, iob_len ( iobuf ) ) ) != 0 ) + goto error; + + /* Free I/O buffer */ + free_iob ( iobuf ); + + return 0; + + error: + ipair_close ( ipair, rc ); + free_iob ( iobuf ); + return rc; +} + +/** + * Pairing transmission timer + * + * @v timer Retransmission timer + * @v over Failure indicator + */ +static void ipair_expired ( struct retry_timer *timer, int over __unused ) { + struct ipair *ipair = container_of ( timer, struct ipair, timer ); + int ( * tx ) ( struct ipair *ipair ); + int rc; + + /* Sanity check */ + tx = ipair->tx; + assert ( tx != NULL ); + + /* Clear pending transmission */ + ipair->tx = NULL; + + /* Transmit data, if applicable */ + if ( ( rc = tx ( ipair ) ) != 0 ) + ipair_close ( ipair, rc ); +} + +/** Pairing client interface operations */ +static struct interface_operation ipair_xfer_operations[] = { + INTF_OP ( xfer_deliver, struct ipair *, ipair_deliver ), + INTF_OP ( xfer_window_changed, struct ipair *, ipair_window_changed ), + INTF_OP ( intf_close, struct ipair *, ipair_close ), +}; + +/** Pairing client interface descriptor */ +static struct interface_descriptor ipair_xfer_desc = + INTF_DESC ( struct ipair, xfer, ipair_xfer_operations ); + +/** + * Create a pairing client + * + * @v xfer Data transfer interface + * @v flags Initial state flags + * @ret rc Return status code + */ +static int ipair_create ( struct interface *xfer, unsigned int flags ) { + struct ipair *ipair; + int rc; + + /* Allocate and initialise structure */ + ipair = zalloc ( sizeof ( *ipair ) ); + if ( ! ipair ) { + rc = -ENOMEM; + goto err_alloc; + } + ref_init ( &ipair->refcnt, ipair_free ); + intf_init ( &ipair->xfer, &ipair_xfer_desc, &ipair->refcnt ); + timer_init ( &ipair->timer, ipair_expired, &ipair->refcnt ); + ipair->tx = ipair_tx_pubkey; + ipair->rx = ipair_rx_pubkey; + ipair->flags = flags; + + /* Schedule initial transmission */ + start_timer_nodelay ( &ipair->timer ); + + /* Attach to parent interface, mortalise self, and return */ + intf_plug_plug ( &ipair->xfer, xfer ); + ref_put ( &ipair->refcnt ); + return 0; + + ref_put ( &ipair->refcnt ); + err_alloc: + return rc; +} + +/****************************************************************************** + * + * iPhone USB networking + * + ****************************************************************************** + */ + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void iphone_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct iphone *iphone = container_of ( ep, struct iphone, usbnet.in ); + struct net_device *netdev = iphone->netdev; + + /* Profile receive completions */ + profile_start ( &iphone_in_profiler ); + + /* Ignore packets cancelled when the endpoint closes */ + if ( ! ep->open ) + goto ignore; + + /* Record USB errors against the network device */ + if ( rc != 0 ) { + DBGC ( iphone, "IPHONE %p bulk IN failed: %s\n", + iphone, strerror ( rc ) ); + goto error; + } + + /* Strip padding */ + if ( iob_len ( iobuf ) < IPHONE_IN_PAD ) { + DBGC ( iphone, "IPHONE %p malformed bulk IN:\n", iphone ); + DBGC_HDA ( iphone, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto error; + } + iob_pull ( iobuf, IPHONE_IN_PAD ); + + /* Hand off to network stack */ + netdev_rx ( netdev, iob_disown ( iobuf ) ); + + profile_stop ( &iphone_in_profiler ); + return; + + error: + netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); + ignore: + free_iob ( iobuf ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations iphone_in_operations = { + .complete = iphone_in_complete, +}; + +/** + * Transmit packet + * + * @v iphone iPhone device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int iphone_out_transmit ( struct iphone *iphone, + struct io_buffer *iobuf ) { + int rc; + + /* Profile transmissions */ + profile_start ( &iphone_out_profiler ); + + /* Enqueue I/O buffer */ + if ( ( rc = usb_stream ( &iphone->usbnet.out, iobuf, 1 ) ) != 0 ) + return rc; + + profile_stop ( &iphone_out_profiler ); + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void iphone_out_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct iphone *iphone = container_of ( ep, struct iphone, usbnet.out ); + struct net_device *netdev = iphone->netdev; + + /* Report TX completion */ + netdev_tx_complete_err ( netdev, iobuf, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations iphone_out_operations = { + .complete = iphone_out_complete, +}; + +/** + * Check pairing status + * + * @v iphone iPhone device + * @ret rc Return status code + */ +static int iphone_check_pair ( struct iphone *iphone ) { + struct imux *imux; + + /* Find corresponding USB multiplexer */ + list_for_each_entry ( imux, &imuxes, list ) { + if ( imux->usb == iphone->usb ) + return imux->rc; + } + + return -EPIPE_NO_MUX; +} + +/** + * Check link status + * + * @v netdev Network device + */ +static void iphone_check_link ( struct net_device *netdev ) { + struct iphone *iphone = netdev->priv; + struct usb_device *usb = iphone->usb; + uint8_t status; + int rc; + + /* Check pairing status */ + if ( ( rc = iphone_check_pair ( iphone ) ) != 0 ) + goto err_pair; + + /* Get link status */ + if ( ( rc = usb_control ( usb, IPHONE_GET_LINK, 0, 0, &status, + sizeof ( status ) ) ) != 0 ) { + DBGC ( iphone, "IPHONE %p could not get link status: %s\n", + iphone, strerror ( rc ) ); + goto err_control; + } + + /* Check link status */ + if ( status != IPHONE_LINK_UP ) { + rc = -ENOTCONN_STATUS ( status ); + goto err_status; + } + + /* Success */ + rc = 0; + + err_status: + err_control: + err_pair: + /* Report link status. Since we have to check the link + * periodically (due to an absence of an interrupt endpoint), + * do this only if the link status has actually changed. + */ + if ( rc != netdev->link_rc ) { + if ( rc == 0 ) { + DBGC ( iphone, "IPHONE %p link up\n", iphone ); + } else { + DBGC ( iphone, "IPHONE %p link down: %s\n", + iphone, strerror ( rc ) ); + } + netdev_link_err ( netdev, rc ); + } +} + +/** + * Periodically update link status + * + * @v timer Link status timer + * @v over Failure indicator + */ +static void iphone_expired ( struct retry_timer *timer, int over __unused ) { + struct iphone *iphone = container_of ( timer, struct iphone, timer ); + struct net_device *netdev = iphone->netdev; + + /* Check link status */ + iphone_check_link ( netdev ); + + /* Restart timer, if device is open */ + if ( netdev_is_open ( netdev ) ) + start_timer_fixed ( timer, IPHONE_LINK_CHECK_INTERVAL ); +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int iphone_open ( struct net_device *netdev ) { + struct iphone *iphone = netdev->priv; + int rc; + + /* Open USB network device */ + if ( ( rc = usbnet_open ( &iphone->usbnet ) ) != 0 ) { + DBGC ( iphone, "IPHONE %p could not open: %s\n", + iphone, strerror ( rc ) ); + goto err_open; + } + + /* Start the link status check timer */ + start_timer_nodelay ( &iphone->timer ); + + return 0; + + usbnet_close ( &iphone->usbnet ); + err_open: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void iphone_close ( struct net_device *netdev ) { + struct iphone *iphone = netdev->priv; + + /* Stop the link status check timer */ + stop_timer ( &iphone->timer ); + + /* Close USB network device */ + usbnet_close ( &iphone->usbnet ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int iphone_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct iphone *iphone = netdev->priv; + int rc; + + /* Transmit packet */ + if ( ( rc = iphone_out_transmit ( iphone, iobuf ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void iphone_poll ( struct net_device *netdev ) { + struct iphone *iphone = netdev->priv; + int rc; + + /* Poll USB bus */ + usb_poll ( iphone->bus ); + + /* Refill endpoints */ + if ( ( rc = usbnet_refill ( &iphone->usbnet ) ) != 0 ) + netdev_rx_err ( netdev, NULL, rc ); +} + +/** iPhone network device operations */ +static struct net_device_operations iphone_operations = { + .open = iphone_open, + .close = iphone_close, + .transmit = iphone_transmit, + .poll = iphone_poll, +}; + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int iphone_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct net_device *netdev; + struct iphone *iphone; + int rc; + + /* Allocate and initialise structure */ + netdev = alloc_etherdev ( sizeof ( *iphone ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &iphone_operations ); + netdev->dev = &func->dev; + iphone = netdev->priv; + memset ( iphone, 0, sizeof ( *iphone ) ); + iphone->usb = usb; + iphone->bus = usb->port->hub->bus; + iphone->netdev = netdev; + usbnet_init ( &iphone->usbnet, func, NULL, &iphone_in_operations, + &iphone_out_operations ); + usb_refill_init ( &iphone->usbnet.in, 0, IPHONE_IN_MTU, + IPHONE_IN_MAX_FILL ); + timer_init ( &iphone->timer, iphone_expired, &netdev->refcnt ); + DBGC ( iphone, "IPHONE %p on %s\n", iphone, func->name ); + + /* Describe USB network device */ + if ( ( rc = usbnet_describe ( &iphone->usbnet, config ) ) != 0 ) { + DBGC ( iphone, "IPHONE %p could not describe: %s\n", + iphone, strerror ( rc ) ); + goto err_describe; + } + + /* Fetch MAC address */ + if ( ( rc = usb_control ( usb, IPHONE_GET_MAC, 0, 0, netdev->hw_addr, + ETH_ALEN ) ) != 0 ) { + DBGC ( iphone, "IPHONE %p could not fetch MAC address: %s\n", + iphone, strerror ( rc ) ); + goto err_fetch_mac; + } + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register; + + /* Set initial link status */ + iphone_check_link ( netdev ); + + /* Add to list of iPhone network devices */ + list_add ( &iphone->list, &iphones ); + + usb_func_set_drvdata ( func, iphone ); + return 0; + + list_del ( &iphone->list ); + unregister_netdev ( netdev ); + err_register: + err_fetch_mac: + err_describe: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void iphone_remove ( struct usb_function *func ) { + struct iphone *iphone = usb_func_get_drvdata ( func ); + struct net_device *netdev = iphone->netdev; + + list_del ( &iphone->list ); + unregister_netdev ( netdev ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** iPhone device IDs */ +static struct usb_device_id iphone_ids[] = { + { + .name = "iphone", + .vendor = 0x05ac, + .product = USB_ANY_ID, + }, +}; + +/** iPhone driver */ +struct usb_driver iphone_driver __usb_driver = { + .ids = iphone_ids, + .id_count = ( sizeof ( iphone_ids ) / sizeof ( iphone_ids[0] ) ), + .class = USB_CLASS_ID ( 0xff, 0xfd, 0x01 ), + .score = USB_SCORE_NORMAL, + .probe = iphone_probe, + .remove = iphone_remove, +}; + +/* Drag in objects via iphone_driver */ +REQUIRING_SYMBOL ( iphone_driver ); + +/* Drag in RSA-with-SHA256 OID prefixes */ +REQUIRE_OBJECT ( rsa_sha256 ); diff --git a/src/drivers/net/iphone.h b/src/drivers/net/iphone.h new file mode 100644 index 000000000..2db6da7bd --- /dev/null +++ b/src/drivers/net/iphone.h @@ -0,0 +1,291 @@ +#ifndef _IPHONE_H +#define _IPHONE_H + +/** @file + * + * iPhone USB Ethernet driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/****************************************************************************** + * + * iPhone pairing certificates + * + ****************************************************************************** + */ + +/** An iPhone pairing certificate set */ +struct icert { + /** "Private" key */ + struct private_key *key; + /** Root certificate */ + struct x509_certificate *root; + /** Host certificate */ + struct x509_certificate *host; + /** Device certificate */ + struct x509_certificate *device; +}; + +/****************************************************************************** + * + * iPhone USB multiplexer + * + ****************************************************************************** + */ + +/** An iPhone USB multiplexed packet header */ +struct imux_header { + /** Protocol */ + uint32_t protocol; + /** Length (including this header) */ + uint32_t len; + /** Reserved */ + uint32_t reserved; + /** Output sequence number */ + uint16_t out_seq; + /** Input sequence number */ + uint16_t in_seq; +} __attribute__ (( packed )); + +/** iPhone USB multiplexer protocols */ +enum imux_protocol { + /** Version number */ + IMUX_VERSION = 0, + /** Log message */ + IMUX_LOG = 1, + /** TCP packet */ + IMUX_TCP = IP_TCP, +}; + +/** An iPhone USB multiplexed version message header */ +struct imux_header_version { + /** Multiplexed packet header */ + struct imux_header hdr; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** An iPhone USB multiplexed log message header */ +struct imux_header_log { + /** Multiplexed packet header */ + struct imux_header hdr; + /** Log level */ + uint8_t level; + /** Message */ + char msg[0]; +} __attribute__ (( packed )); + +/** An iPhone USB multiplexed pseudo-TCP message header */ +struct imux_header_tcp { + /** Multiplexed packet header */ + struct imux_header hdr; + /** Pseudo-TCP header */ + struct tcp_header tcp; +} __attribute__ (( packed )); + +/** Local port number + * + * This is a policy decision. + */ +#define IMUX_PORT_LOCAL 0x18ae + +/** Lockdown daemon port number */ +#define IMUX_PORT_LOCKDOWND 62078 + +/** Advertised TCP window + * + * This is a policy decision. + */ +#define IMUX_WINDOW 0x0200 + +/** An iPhone USB multiplexer */ +struct imux { + /** Reference counter */ + struct refcnt refcnt; + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** USB network device */ + struct usbnet_device usbnet; + /** List of USB multiplexers */ + struct list_head list; + + /** Polling process */ + struct process process; + /** Pending action + * + * @v imux USB multiplexer + * @ret rc Return status code + */ + int ( * action ) ( struct imux *imux ); + + /** Input sequence */ + uint16_t in_seq; + /** Output sequence */ + uint16_t out_seq; + /** Pseudo-TCP sequence number */ + uint32_t tcp_seq; + /** Pseudo-TCP acknowledgement number */ + uint32_t tcp_ack; + /** Pseudo-TCP local port number */ + uint16_t port; + + /** Pseudo-TCP lockdown socket interface */ + struct interface tcp; + /** Pairing flags */ + unsigned int flags; + /** Pairing status */ + int rc; +}; + +/** Multiplexer bulk IN maximum fill level + * + * This is a policy decision. + */ +#define IMUX_IN_MAX_FILL 1 + +/** Multiplexer bulk IN buffer size + * + * This is a policy decision. + */ +#define IMUX_IN_MTU 4096 + +/****************************************************************************** + * + * iPhone pairing client + * + ****************************************************************************** + */ + +/** An iPhone USB multiplexed pseudo-TCP XML message header */ +struct ipair_header { + /** Message length */ + uint32_t len; + /** Message */ + char msg[0]; +} __attribute__ (( packed )); + +/** An iPhone pairing client */ +struct ipair { + /** Reference counter */ + struct refcnt refcnt; + /** Data transfer interface */ + struct interface xfer; + + /** Pairing timer */ + struct retry_timer timer; + /** Transmit message + * + * @v ipair Pairing client + * @ret rc Return status code + */ + int ( * tx ) ( struct ipair *ipair ); + /** Receive message + * + * @v ipair Pairing client + * @v msg XML message + * @ret rc Return status code + */ + int ( * rx ) ( struct ipair *ipair, char *msg ); + /** State flags */ + unsigned int flags; + + /** Pairing certificates */ + struct icert icert; +}; + +/** Pairing client state flags */ +enum ipair_flags { + /** Request a new pairing */ + IPAIR_REQUEST = 0x0001, + /** Standalone length has been received */ + IPAIR_RX_LEN = 0x0002, + /** TLS session has been started */ + IPAIR_TLS = 0x0004, +}; + +/** Pairing retry delay + * + * This is a policy decision. + */ +#define IPAIR_RETRY_DELAY ( 1 * TICKS_PER_SEC ) + +/****************************************************************************** + * + * iPhone USB networking + * + ****************************************************************************** + */ + +/** Get MAC address */ +#define IPHONE_GET_MAC \ + ( USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0x00 ) ) + +/** Get link status */ +#define IPHONE_GET_LINK \ + ( USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE | \ + USB_REQUEST_TYPE ( 0x45 ) ) + +/** An iPhone link status */ +enum iphone_link_status { + /** Personal Hotspot is disabled */ + IPHONE_LINK_DISABLED = 0x03, + /** Link up */ + IPHONE_LINK_UP = 0x04, + /** Link not yet determined */ + IPHONE_LINK_UNKNOWN = -1U, +}; + +/** An iPhone network device */ +struct iphone { + /** USB device */ + struct usb_device *usb; + /** USB bus */ + struct usb_bus *bus; + /** Network device */ + struct net_device *netdev; + /** USB network device */ + struct usbnet_device usbnet; + + /** List of iPhone network devices */ + struct list_head list; + /** Link status check timer */ + struct retry_timer timer; +}; + +/** Bulk IN padding */ +#define IPHONE_IN_PAD 2 + +/** Bulk IN buffer size + * + * This is a policy decision. + */ +#define IPHONE_IN_MTU ( ETH_FRAME_LEN + IPHONE_IN_PAD ) + +/** Bulk IN maximum fill level + * + * This is a policy decision. + */ +#define IPHONE_IN_MAX_FILL 8 + +/** Link check interval + * + * This is a policy decision. + */ +#define IPHONE_LINK_CHECK_INTERVAL ( 5 * TICKS_PER_SEC ) + +#endif /* _IPHONE_H */ diff --git a/src/drivers/net/jme.c b/src/drivers/net/jme.c index 29694b699..c7307728d 100644 --- a/src/drivers/net/jme.c +++ b/src/drivers/net/jme.c @@ -262,7 +262,7 @@ jme_free_tx_resources(struct jme_adapter *jme) sizeof(struct io_buffer *) * jme->tx_ring_size); free(txring->bufinf); } - free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE); + free_phys(txring->desc, jme->tx_ring_size * TX_DESC_SIZE); txring->desc = NULL; txring->dma = 0; txring->bufinf = NULL; @@ -277,7 +277,7 @@ jme_alloc_tx_resources(struct jme_adapter *jme) { struct jme_ring *txring = &jme->txring; - txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE, + txring->desc = malloc_phys(jme->tx_ring_size * TX_DESC_SIZE, RING_DESC_ALIGN); if (!txring->desc) { DBG("Can not allocate transmit ring descriptors.\n"); @@ -442,7 +442,7 @@ jme_free_rx_resources(struct jme_adapter *jme) free(rxring->bufinf); } - free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE); + free_phys(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE); rxring->desc = NULL; rxring->dma = 0; rxring->bufinf = NULL; @@ -458,7 +458,7 @@ jme_alloc_rx_resources(struct jme_adapter *jme) struct jme_ring *rxring = &jme->rxring; struct io_buffer **bufinf; - rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE, + rxring->desc = malloc_phys(jme->rx_ring_size * RX_DESC_SIZE, RING_DESC_ALIGN); if (!rxring->desc) { DBG("Can not allocate receive ring descriptors.\n"); @@ -1191,7 +1191,7 @@ jme_probe(struct pci_device *pci) jme = netdev->priv; pci_set_drvdata(pci, netdev); netdev->dev = &pci->dev; - jme->regs = ioremap(pci->membase, JME_REGS_SIZE); + jme->regs = pci_ioremap(pci, pci->membase, JME_REGS_SIZE); if (!(jme->regs)) { DBG("Mapping PCI resource region error.\n"); rc = -ENOMEM; diff --git a/src/drivers/net/myri10ge.c b/src/drivers/net/myri10ge.c index ae6b6c21e..6d0f723f2 100644 --- a/src/drivers/net/myri10ge.c +++ b/src/drivers/net/myri10ge.c @@ -66,7 +66,7 @@ FILE_LICENCE ( GPL2_ONLY ); /* * Debugging levels: - * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(), + * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(), * TX overflow, corrupted packets, ... * - DBG2() is for successful events, like packet received, * packet transmitted, and other general notifications. @@ -918,7 +918,7 @@ static void myri10ge_net_close ( struct net_device *netdev ) /* Release DMAable memory. */ - free_dma ( priv->dma, sizeof ( *priv->dma ) ); + free_phys ( priv->dma, sizeof ( *priv->dma ) ); /* Erase all state from the open. */ @@ -988,7 +988,7 @@ static int myri10ge_net_open ( struct net_device *netdev ) /* Allocate cleared DMAable buffers. */ - priv->dma = malloc_dma ( sizeof ( *priv->dma ) , 128 ); + priv->dma = malloc_phys ( sizeof ( *priv->dma ) , 128 ); if ( !priv->dma ) { rc = -ENOMEM; dbg = "DMA"; @@ -1152,7 +1152,7 @@ abort_with_receives_posted: free_iob ( priv->receive_iob[priv->receives_posted] ); abort_with_dma: /* Because the link is not up, we don't have to reset the NIC here. */ - free_dma ( priv->dma, sizeof ( *priv->dma ) ); + free_phys ( priv->dma, sizeof ( *priv->dma ) ); abort_with_nothing: /* Erase all signs of the failed open. */ memset ( priv, 0, sizeof ( *priv ) ); diff --git a/src/drivers/net/myson.c b/src/drivers/net/myson.c index 84a550596..4ab2bf345 100644 --- a/src/drivers/net/myson.c +++ b/src/drivers/net/myson.c @@ -165,7 +165,7 @@ static int myson_create_ring ( struct myson_nic *myson, int rc; /* Allocate descriptor ring */ - ring->desc = malloc_dma ( len, MYSON_RING_ALIGN ); + ring->desc = malloc_phys ( len, MYSON_RING_ALIGN ); if ( ! ring->desc ) { rc = -ENOMEM; goto err_alloc; @@ -197,7 +197,7 @@ static int myson_create_ring ( struct myson_nic *myson, return 0; err_64bit: - free_dma ( ring->desc, len ); + free_phys ( ring->desc, len ); ring->desc = NULL; err_alloc: return rc; @@ -217,7 +217,7 @@ static void myson_destroy_ring ( struct myson_nic *myson, writel ( 0, myson->regs + ring->reg ); /* Free descriptor ring */ - free_dma ( ring->desc, len ); + free_phys ( ring->desc, len ); ring->desc = NULL; ring->prod = 0; ring->cons = 0; @@ -606,7 +606,7 @@ static int myson_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - myson->regs = ioremap ( pci->membase, MYSON_BAR_SIZE ); + myson->regs = pci_ioremap ( pci, pci->membase, MYSON_BAR_SIZE ); if ( ! myson->regs ) { rc = -ENODEV; goto err_ioremap; diff --git a/src/drivers/net/natsemi.c b/src/drivers/net/natsemi.c index 9f2c3029c..ba99bc2fe 100644 --- a/src/drivers/net/natsemi.c +++ b/src/drivers/net/natsemi.c @@ -408,7 +408,7 @@ static int natsemi_create_ring ( struct natsemi_nic *natsemi, * ensure that it can't possibly cross the boundary of 32-bit * address space. */ - ring->desc = malloc_dma ( len, len ); + ring->desc = malloc_phys ( len, len ); if ( ! ring->desc ) { rc = -ENOMEM; goto err_alloc; @@ -454,7 +454,7 @@ static int natsemi_create_ring ( struct natsemi_nic *natsemi, return 0; err_64bit: - free_dma ( ring->desc, len ); + free_phys ( ring->desc, len ); ring->desc = NULL; err_alloc: return rc; @@ -476,7 +476,7 @@ static void natsemi_destroy_ring ( struct natsemi_nic *natsemi, writel ( 0, natsemi->regs + ring->reg + 4 ); /* Free descriptor ring */ - free_dma ( ring->desc, len ); + free_phys ( ring->desc, len ); ring->desc = NULL; ring->prod = 0; ring->cons = 0; @@ -853,7 +853,7 @@ static int natsemi_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - natsemi->regs = ioremap ( pci->membase, NATSEMI_BAR_SIZE ); + natsemi->regs = pci_ioremap ( pci, pci->membase, NATSEMI_BAR_SIZE ); if ( ! natsemi->regs ) { rc = -ENODEV; goto err_ioremap; diff --git a/src/drivers/net/ncm.c b/src/drivers/net/ncm.c index 1837291f7..cc07a4388 100644 --- a/src/drivers/net/ncm.c +++ b/src/drivers/net/ncm.c @@ -558,6 +558,8 @@ static int ncm_probe ( struct usb_function *func, struct usb_interface_descriptor *comms; struct ecm_ethernet_descriptor *ethernet; struct ncm_ntb_parameters params; + unsigned int remainder; + unsigned int divisor; int rc; /* Allocate and initialise structure */ @@ -616,14 +618,15 @@ static int ncm_probe ( struct usb_function *func, DBGC2 ( ncm, "NCM %p maximum IN size is %zd bytes\n", ncm, ncm->mtu ); /* Calculate transmit padding */ - ncm->padding = ( ( le16_to_cpu ( params.out.remainder ) - - sizeof ( struct ncm_ntb_header ) - ETH_HLEN ) & - ( le16_to_cpu ( params.out.divisor ) - 1 ) ); + divisor = ( params.out.divisor ? + le16_to_cpu ( params.out.divisor ) : 1 ); + remainder = le16_to_cpu ( params.out.remainder ); + ncm->padding = ( ( remainder - sizeof ( struct ncm_ntb_header ) - + ETH_HLEN ) & ( divisor - 1 ) ); DBGC2 ( ncm, "NCM %p using %zd-byte transmit padding\n", ncm, ncm->padding ); assert ( ( ( sizeof ( struct ncm_ntb_header ) + ncm->padding + - ETH_HLEN ) % le16_to_cpu ( params.out.divisor ) ) == - le16_to_cpu ( params.out.remainder ) ); + ETH_HLEN ) % divisor ) == remainder ); /* Register network device */ if ( ( rc = register_netdev ( netdev ) ) != 0 ) diff --git a/src/drivers/net/netfront.c b/src/drivers/net/netfront.c index b6205542b..1203e585c 100644 --- a/src/drivers/net/netfront.c +++ b/src/drivers/net/netfront.c @@ -56,7 +56,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); __einfo_uniqify ( EINFO_EIO, -NETIF_RSP_DROPPED, \ "Packet dropped" ) #define EIO_NETIF_RSP( status ) \ - EUNIQ ( EINFO_EIO, -(status), \ + EUNIQ ( EINFO_EIO, ( -(status) & 0x1f ), \ EIO_NETIF_RSP_ERROR, EIO_NETIF_RSP_DROPPED ) /****************************************************************************** @@ -326,6 +326,7 @@ static int netfront_create_ring ( struct netfront_nic *netfront, struct netfront_ring *ring ) { struct xen_device *xendev = netfront->xendev; struct xen_hypervisor *xen = xendev->xen; + physaddr_t addr; unsigned int i; int rc; @@ -338,18 +339,18 @@ static int netfront_create_ring ( struct netfront_nic *netfront, ring->id_cons = 0; /* Allocate and initialise shared ring */ - ring->sring.raw = malloc_dma ( PAGE_SIZE, PAGE_SIZE ); + ring->sring.raw = malloc_phys ( PAGE_SIZE, PAGE_SIZE ); if ( ! ring->sring.raw ) { rc = -ENOMEM; goto err_alloc; } /* Grant access to shared ring */ + addr = virt_to_phys ( ring->sring.raw ); if ( ( rc = xengrant_permit_access ( xen, ring->ref, xendev->backend_id, - 0, ring->sring.raw ) ) != 0 ) { + 0, addr ) ) != 0 ) { DBGC ( netfront, "NETFRONT %s could not permit access to " - "%#08lx: %s\n", xendev->key, - virt_to_phys ( ring->sring.raw ), strerror ( rc ) ); + "%#08lx: %s\n", xendev->key, addr, strerror ( rc ) ); goto err_permit_access; } @@ -358,17 +359,15 @@ static int netfront_create_ring ( struct netfront_nic *netfront, ring->ref ) ) != 0 ) goto err_write_num; - DBGC ( netfront, "NETFRONT %s %s=\"%d\" [%08lx,%08lx)\n", - xendev->key, ring->ref_key, ring->ref, - virt_to_phys ( ring->sring.raw ), - ( virt_to_phys ( ring->sring.raw ) + PAGE_SIZE ) ); + DBGC ( netfront, "NETFRONT %s %s=\"%d\" [%08lx,%08lx)\n", xendev->key, + ring->ref_key, ring->ref, addr, ( addr + PAGE_SIZE ) ); return 0; netfront_rm ( netfront, ring->ref_key ); err_write_num: xengrant_invalidate ( xen, ring->ref ); err_permit_access: - free_dma ( ring->sring.raw, PAGE_SIZE ); + free_phys ( ring->sring.raw, PAGE_SIZE ); err_alloc: return rc; } @@ -378,7 +377,8 @@ static int netfront_create_ring ( struct netfront_nic *netfront, * * @v netfront Netfront device * @v ring Descriptor ring - * @v iobuf I/O buffer + * @v addr Physical address + * @v iobuf Associated I/O buffer, or NULL * @v id Buffer ID to fill in * @v ref Grant reference to fill in * @ret rc Return status code @@ -387,8 +387,9 @@ static int netfront_create_ring ( struct netfront_nic *netfront, * ring. */ static int netfront_push ( struct netfront_nic *netfront, - struct netfront_ring *ring, struct io_buffer *iobuf, - uint16_t *id, grant_ref_t *ref ) { + struct netfront_ring *ring, physaddr_t addr, + struct io_buffer *iobuf, uint16_t *id, + grant_ref_t *ref ) { struct xen_device *xendev = netfront->xendev; struct xen_hypervisor *xen = xendev->xen; unsigned int next_id; @@ -402,19 +403,15 @@ static int netfront_push ( struct netfront_nic *netfront, next_id = ring->ids[ ring->id_prod & ( ring->count - 1 ) ]; next_ref = ring->refs[next_id]; - /* Grant access to I/O buffer page. I/O buffers are naturally - * aligned, so we never need to worry about crossing a page - * boundary. - */ + /* Grant access to page containing address */ if ( ( rc = xengrant_permit_access ( xen, next_ref, xendev->backend_id, - 0, iobuf->data ) ) != 0 ) { + 0, addr ) ) != 0 ) { DBGC ( netfront, "NETFRONT %s could not permit access to " - "%#08lx: %s\n", xendev->key, - virt_to_phys ( iobuf->data ), strerror ( rc ) ); + "%#08lx: %s\n", xendev->key, addr, strerror ( rc ) ); return rc; } - /* Store I/O buffer */ + /* Store associated I/O buffer, if any */ assert ( ring->iobufs[next_id] == NULL ); ring->iobufs[next_id] = iobuf; @@ -434,7 +431,7 @@ static int netfront_push ( struct netfront_nic *netfront, * @v netfront Netfront device * @v ring Descriptor ring * @v id Buffer ID - * @ret iobuf I/O buffer + * @ret iobuf Associated I/O buffer, if any */ static struct io_buffer * netfront_pull ( struct netfront_nic *netfront, struct netfront_ring *ring, @@ -451,7 +448,6 @@ static struct io_buffer * netfront_pull ( struct netfront_nic *netfront, /* Retrieve I/O buffer */ iobuf = ring->iobufs[id]; - assert ( iobuf != NULL ); ring->iobufs[id] = NULL; /* Free buffer ID */ @@ -490,10 +486,26 @@ static void netfront_destroy_ring ( struct netfront_nic *netfront, xengrant_invalidate ( xen, ring->ref ); /* Free page */ - free_dma ( ring->sring.raw, PAGE_SIZE ); + free_phys ( ring->sring.raw, PAGE_SIZE ); ring->sring.raw = NULL; } +/** + * Discard partially received I/O buffers + * + * @v netfront Netfront device + */ +static void netfront_discard ( struct netfront_nic *netfront ) { + struct io_buffer *iobuf; + struct io_buffer *tmp; + + /* Discard all buffers in the list */ + list_for_each_entry_safe ( iobuf, tmp, &netfront->rx_partial, list ) { + list_del ( &iobuf->list ); + free_iob ( iobuf ); + } +} + /****************************************************************************** * * Network device interface @@ -512,6 +524,7 @@ static void netfront_refill_rx ( struct net_device *netdev ) { struct io_buffer *iobuf; struct netif_rx_request *request; unsigned int refilled = 0; + physaddr_t addr; int notify; int rc; @@ -524,24 +537,24 @@ static void netfront_refill_rx ( struct net_device *netdev ) { /* Wait for next refill */ break; } + addr = virt_to_phys ( iobuf->data ); /* Add to descriptor ring */ request = RING_GET_REQUEST ( &netfront->rx_fring, netfront->rx_fring.req_prod_pvt ); - if ( ( rc = netfront_push ( netfront, &netfront->rx, + if ( ( rc = netfront_push ( netfront, &netfront->rx, addr, iobuf, &request->id, &request->gref ) ) != 0 ) { netdev_rx_err ( netdev, iobuf, rc ); break; } DBGC2 ( netfront, "NETFRONT %s RX id %d ref %d is %#08lx+%zx\n", - xendev->key, request->id, request->gref, - virt_to_phys ( iobuf->data ), iob_tailroom ( iobuf ) ); + xendev->key, request->id, request->gref, addr, + iob_tailroom ( iobuf ) ); /* Move to next descriptor */ netfront->rx_fring.req_prod_pvt++; refilled++; - } /* Push new descriptors and notify backend if applicable */ @@ -593,6 +606,10 @@ static int netfront_open ( struct net_device *netdev ) { if ( ( rc = netfront_write_flag ( netfront, "request-rx-copy" ) ) != 0 ) goto err_request_rx_copy; + /* Inform backend that we can support scatter-gather */ + if ( ( rc = netfront_write_flag ( netfront, "feature-sg" ) ) != 0 ) + goto err_feature_sg; + /* Disable checksum offload, since we will always do the work anyway */ if ( ( rc = netfront_write_flag ( netfront, "feature-no-csum-offload" ) ) != 0 ) @@ -632,6 +649,8 @@ static int netfront_open ( struct net_device *netdev ) { err_feature_rx_notify: netfront_rm ( netfront, "feature-no-csum-offload" ); err_feature_no_csum_offload: + netfront_rm ( netfront, "feature-sg" ); + err_feature_sg: netfront_rm ( netfront, "request-rx-copy" ); err_request_rx_copy: netfront_destroy_event ( netfront ); @@ -675,11 +694,15 @@ static void netfront_close ( struct net_device *netdev ) { /* Delete flags */ netfront_rm ( netfront, "feature-rx-notify" ); netfront_rm ( netfront, "feature-no-csum-offload" ); + netfront_rm ( netfront, "feature-sg" ); netfront_rm ( netfront, "request-rx-copy" ); /* Destroy event channel */ netfront_destroy_event ( netfront ); + /* Discard any partially received I/O buffers */ + netfront_discard ( netfront ); + /* Destroy receive descriptor ring, freeing any outstanding * I/O buffers. */ @@ -703,34 +726,66 @@ static int netfront_transmit ( struct net_device *netdev, struct netfront_nic *netfront = netdev->priv; struct xen_device *xendev = netfront->xendev; struct netif_tx_request *request; + physaddr_t addr; + size_t len; + size_t remaining; + size_t frag_len; + unsigned int offset; + unsigned int count; + unsigned int more; int notify; int rc; + /* Calculate number of page buffers required */ + addr = virt_to_phys ( iobuf->data ); + len = iob_len ( iobuf ); + offset = ( addr & ( PAGE_SIZE - 1 ) ); + count = ( ( offset + len + PAGE_SIZE - 1 ) / PAGE_SIZE ); + /* Check that we have space in the ring */ - if ( netfront_ring_is_full ( &netfront->tx ) ) { + if ( netfront_ring_space ( &netfront->tx ) < count ) { DBGC ( netfront, "NETFRONT %s out of transmit descriptors\n", xendev->key ); return -ENOBUFS; } /* Add to descriptor ring */ - request = RING_GET_REQUEST ( &netfront->tx_fring, - netfront->tx_fring.req_prod_pvt ); - if ( ( rc = netfront_push ( netfront, &netfront->tx, iobuf, - &request->id, &request->gref ) ) != 0 ) { - return rc; + remaining = len; + while ( remaining ) { + + /* Calculate length of this fragment */ + frag_len = ( PAGE_SIZE - offset ); + if ( frag_len >= remaining ) { + frag_len = remaining; + more = 0; + } else { + more = NETTXF_more_data; + } + + /* Populate request */ + request = RING_GET_REQUEST ( &netfront->tx_fring, + netfront->tx_fring.req_prod_pvt ); + if ( ( rc = netfront_push ( netfront, &netfront->tx, addr, + ( more ? NULL : iobuf ), + &request->id, + &request->gref ) ) != 0 ) { + return rc; + } + request->flags = ( NETTXF_data_validated | more ); + request->offset = offset; + request->size = ( ( remaining == len ) ? len : frag_len ); + DBGC2 ( netfront, "NETFRONT %s TX id %d ref %d is " + "%#08lx+%zx%s\n", xendev->key, request->id, + request->gref, addr, frag_len, ( more ? "..." : "" ) ); + + /* Move to next descriptor */ + netfront->tx_fring.req_prod_pvt++; + addr += frag_len; + remaining -= frag_len; + offset = 0; } - request->offset = ( virt_to_phys ( iobuf->data ) & ( PAGE_SIZE - 1 ) ); - request->flags = NETTXF_data_validated; - request->size = iob_len ( iobuf ); - DBGC2 ( netfront, "NETFRONT %s TX id %d ref %d is %#08lx+%zx\n", - xendev->key, request->id, request->gref, - virt_to_phys ( iobuf->data ), iob_len ( iobuf ) ); - /* Consume descriptor */ - netfront->tx_fring.req_prod_pvt++; - - /* Push new descriptor and notify backend if applicable */ + /* Push new descriptors and notify backend if applicable */ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY ( &netfront->tx_fring, notify ); if ( notify ) netfront_send_event ( netfront ); @@ -748,7 +803,7 @@ static void netfront_poll_tx ( struct net_device *netdev ) { struct xen_device *xendev = netfront->xendev; struct netif_tx_response *response; struct io_buffer *iobuf; - unsigned int status; + int status; int rc; /* Consume any unconsumed responses */ @@ -761,10 +816,11 @@ static void netfront_poll_tx ( struct net_device *netdev ) { /* Retrieve from descriptor ring */ iobuf = netfront_pull ( netfront, &netfront->tx, response->id ); status = response->status; - if ( status == NETIF_RSP_OKAY ) { + if ( status >= NETIF_RSP_OKAY ) { DBGC2 ( netfront, "NETFRONT %s TX id %d complete\n", xendev->key, response->id ); - netdev_tx_complete ( netdev, iobuf ); + if ( iobuf ) + netdev_tx_complete ( netdev, iobuf ); } else { rc = -EIO_NETIF_RSP ( status ); DBGC2 ( netfront, "NETFRONT %s TX id %d error %d: %s\n", @@ -786,6 +842,7 @@ static void netfront_poll_rx ( struct net_device *netdev ) { struct netif_rx_response *response; struct io_buffer *iobuf; int status; + int more; size_t len; int rc; @@ -799,21 +856,45 @@ static void netfront_poll_rx ( struct net_device *netdev ) { /* Retrieve from descriptor ring */ iobuf = netfront_pull ( netfront, &netfront->rx, response->id ); status = response->status; - if ( status >= 0 ) { - len = status; - iob_reserve ( iobuf, response->offset ); - iob_put ( iobuf, len ); - DBGC2 ( netfront, "NETFRONT %s RX id %d complete " - "%#08lx+%zx\n", xendev->key, response->id, - virt_to_phys ( iobuf->data ), len ); - netdev_rx ( netdev, iobuf ); - } else { + more = ( response->flags & NETRXF_more_data ); + + /* Report errors */ + if ( status < 0 ) { rc = -EIO_NETIF_RSP ( status ); DBGC2 ( netfront, "NETFRONT %s RX id %d error %d: %s\n", xendev->key, response->id, status, strerror ( rc ) ); + netfront_discard ( netfront ); netdev_rx_err ( netdev, iobuf, rc ); + continue; } + + /* Add to partial receive list */ + len = status; + iob_reserve ( iobuf, response->offset ); + iob_put ( iobuf, len ); + DBGC2 ( netfront, "NETFRONT %s RX id %d complete " + "%#08lx+%zx%s\n", xendev->key, response->id, + virt_to_phys ( iobuf->data ), len, + ( more ? "..." : "" ) ); + list_add_tail ( &iobuf->list, &netfront->rx_partial ); + + /* Wait until complete packet has been received */ + if ( more ) + continue; + + /* Reassemble complete packet */ + iobuf = iob_concatenate ( &netfront->rx_partial ); + if ( ! iobuf ) { + DBGC2 ( netfront, "NETFRONT %s RX reassembly failed\n", + xendev->key ); + netfront_discard ( netfront ); + netdev_rx_err ( netdev, NULL, -ENOMEM ); + continue; + } + + /* Hand off to network stack */ + netdev_rx ( netdev, iobuf ); } } @@ -871,6 +952,7 @@ static int netfront_probe ( struct xen_device *xendev ) { netdev->dev = &xendev->dev; netfront = netdev->priv; netfront->xendev = xendev; + INIT_LIST_HEAD ( &netfront->rx_partial ); DBGC ( netfront, "NETFRONT %s backend=\"%s\" in domain %ld\n", xendev->key, xendev->backend, xendev->backend_id ); diff --git a/src/drivers/net/netfront.h b/src/drivers/net/netfront.h index c95ed2645..dca3ff1c5 100644 --- a/src/drivers/net/netfront.h +++ b/src/drivers/net/netfront.h @@ -65,7 +65,7 @@ struct netfront_ring { size_t count; /** I/O buffers, indexed by buffer ID */ struct io_buffer **iobufs; - /** I/O buffer grant references, indexed by buffer ID */ + /** Grant references, indexed by buffer ID */ grant_ref_t *refs; /** Buffer ID ring */ @@ -116,6 +116,18 @@ netfront_ring_fill ( struct netfront_ring *ring ) { return fill_level; } +/** + * Calculate descriptor ring remaining space + * + * @v ring Descriptor ring + * @v space Number of unused entries + */ +static inline __attribute__ (( always_inline )) unsigned int +netfront_ring_space ( struct netfront_ring *ring ) { + + return ( ring->count - netfront_ring_fill ( ring ) ); +} + /** * Check whether or not descriptor ring is full * @@ -164,6 +176,8 @@ struct netfront_nic { struct io_buffer *rx_iobufs[NETFRONT_NUM_RX_DESC]; /** Receive I/O buffer IDs */ uint8_t rx_ids[NETFRONT_NUM_RX_DESC]; + /** Partial receive I/O buffer list */ + struct list_head rx_partial; /** Event channel */ struct evtchn_send event; diff --git a/src/drivers/net/pcnet32.c b/src/drivers/net/pcnet32.c index 2635aaca2..c0dea86a8 100644 --- a/src/drivers/net/pcnet32.c +++ b/src/drivers/net/pcnet32.c @@ -246,7 +246,7 @@ pcnet32_setup_rx_resources ( struct pcnet32_private *priv ) { DBGP ( "pcnet32_setup_rx_resources\n" ); - priv->rx_base = malloc_dma ( RX_RING_BYTES, RX_RING_ALIGN ); + priv->rx_base = malloc_phys ( RX_RING_BYTES, RX_RING_ALIGN ); DBG ( "priv->rx_base = %#08lx\n", virt_to_bus ( priv->rx_base ) ); @@ -270,7 +270,7 @@ pcnet32_free_rx_resources ( struct pcnet32_private *priv ) DBGP ( "pcnet32_free_rx_resources\n" ); - free_dma ( priv->rx_base, RX_RING_BYTES ); + free_phys ( priv->rx_base, RX_RING_BYTES ); for ( i = 0; i < RX_RING_SIZE; i++ ) { free_iob ( priv->rx_iobuf[i] ); @@ -290,7 +290,7 @@ pcnet32_setup_tx_resources ( struct pcnet32_private *priv ) { DBGP ( "pcnet32_setup_tx_resources\n" ); - priv->tx_base = malloc_dma ( TX_RING_BYTES, TX_RING_ALIGN ); + priv->tx_base = malloc_phys ( TX_RING_BYTES, TX_RING_ALIGN ); if ( ! priv->tx_base ) { return -ENOMEM; @@ -312,7 +312,7 @@ pcnet32_free_tx_resources ( struct pcnet32_private *priv ) { DBGP ( "pcnet32_free_tx_resources\n" ); - free_dma ( priv->tx_base, TX_RING_BYTES ); + free_phys ( priv->tx_base, TX_RING_BYTES ); } static int diff --git a/src/drivers/net/phantom/phantom.c b/src/drivers/net/phantom/phantom.c index 781049ff4..843459059 100644 --- a/src/drivers/net/phantom/phantom.c +++ b/src/drivers/net/phantom/phantom.c @@ -640,7 +640,7 @@ static int phantom_create_rx_ctx ( struct phantom_nic *phantom ) { int rc; /* Allocate context creation buffer */ - buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN ); + buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN ); if ( ! buf ) { rc = -ENOMEM; goto out; @@ -716,7 +716,7 @@ static int phantom_create_rx_ctx ( struct phantom_nic *phantom ) { phantom, phantom->sds_irq_mask_crb ); out: - free_dma ( buf, sizeof ( *buf ) ); + free_phys ( buf, sizeof ( *buf ) ); return rc; } @@ -765,7 +765,7 @@ static int phantom_create_tx_ctx ( struct phantom_nic *phantom ) { int rc; /* Allocate context creation buffer */ - buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN ); + buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN ); if ( ! buf ) { rc = -ENOMEM; goto out; @@ -821,7 +821,7 @@ static int phantom_create_tx_ctx ( struct phantom_nic *phantom ) { phantom, phantom->cds_producer_crb ); out: - free_dma ( buf, sizeof ( *buf ) ); + free_phys ( buf, sizeof ( *buf ) ); return rc; } @@ -1164,8 +1164,8 @@ static int phantom_open ( struct net_device *netdev ) { int rc; /* Allocate and zero descriptor rings */ - phantom->desc = malloc_dma ( sizeof ( *(phantom->desc) ), - UNM_DMA_BUFFER_ALIGN ); + phantom->desc = malloc_phys ( sizeof ( *(phantom->desc) ), + UNM_DMA_BUFFER_ALIGN ); if ( ! phantom->desc ) { rc = -ENOMEM; goto err_alloc_desc; @@ -1208,7 +1208,7 @@ static int phantom_open ( struct net_device *netdev ) { err_create_tx_ctx: phantom_destroy_rx_ctx ( phantom ); err_create_rx_ctx: - free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) ); + free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) ); phantom->desc = NULL; err_alloc_desc: return rc; @@ -1229,7 +1229,7 @@ static void phantom_close ( struct net_device *netdev ) { phantom_del_macaddr ( phantom, netdev->ll_broadcast ); phantom_destroy_tx_ctx ( phantom ); phantom_destroy_rx_ctx ( phantom ); - free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) ); + free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) ); phantom->desc = NULL; /* Flush any uncompleted descriptors */ @@ -1837,7 +1837,7 @@ static int phantom_map_crb ( struct phantom_nic *phantom, return -EINVAL; } - phantom->bar0 = ioremap ( bar0_start, bar0_size ); + phantom->bar0 = pci_ioremap ( pci, bar0_start, bar0_size ); if ( ! phantom->bar0 ) { DBGC ( phantom, "Phantom %p could not map BAR0\n", phantom ); return -EIO; diff --git a/src/drivers/net/prism2_pci.c b/src/drivers/net/prism2_pci.c index 69ddf0fb0..2feb69522 100644 --- a/src/drivers/net/prism2_pci.c +++ b/src/drivers/net/prism2_pci.c @@ -36,7 +36,7 @@ static int prism2_pci_probe ( struct nic *nic, struct pci_device *pci ) { hfa384x_t *hw = &hw_global; printf ( "Prism2.5 has registers at %#lx\n", pci->membase ); - hw->membase = ioremap ( pci->membase, 0x100 ); + hw->membase = pci_ioremap ( pci, pci->membase, 0x100 ); nic->ioaddr = pci->membase; nic->irqno = 0; diff --git a/src/drivers/net/rdc.c b/src/drivers/net/rdc.c new file mode 100644 index 000000000..c3239c002 --- /dev/null +++ b/src/drivers/net/rdc.c @@ -0,0 +1,694 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rdc.h" + +/** @file + * + * RDC R6040 network driver + * + */ + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v rdc RDC device + * @ret rc Return status code + */ +static int rdc_reset ( struct rdc_nic *rdc ) { + unsigned int i; + + /* Reset NIC */ + writew ( RDC_MCR1_RST, rdc->regs + RDC_MCR1 ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < RDC_RESET_MAX_WAIT_MS ; i++ ) { + + /* Check for reset completion */ + if ( readw ( rdc->regs + RDC_MCR1 ) & RDC_MCR1_RST ) { + mdelay ( 1 ); + continue; + } + + /* Reset internal state machine */ + writew ( RDC_MACSM_RST, rdc->regs + RDC_MACSM ); + writew ( 0, rdc->regs + RDC_MACSM ); + mdelay ( RDC_MACSM_RESET_DELAY_MS ); + + return 0; + } + + DBGC ( rdc, "RDC %p timed out waiting for reset\n", rdc ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * MII interface + * + ****************************************************************************** + */ + +/** + * Read from MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @ret value Data read, or negative error + */ +static int rdc_mii_read ( struct mii_interface *mdio, unsigned int phy, + unsigned int reg ) { + struct rdc_nic *rdc = container_of ( mdio, struct rdc_nic, mdio ); + uint16_t mmdio; + unsigned int i; + + /* Initiate read */ + mmdio = ( RDC_MMDIO_MIIRD | RDC_MMDIO_PHYAD ( phy ) | + RDC_MMDIO_REGAD ( reg ) ); + writew ( mmdio, rdc->regs + RDC_MMDIO ); + + /* Wait for read to complete */ + for ( i = 0 ; i < RDC_MII_MAX_WAIT_US ; i++ ) { + + /* Check for read completion */ + if ( readw ( rdc->regs + RDC_MMDIO ) & RDC_MMDIO_MIIRD ) { + udelay ( 1 ); + continue; + } + + /* Return register value */ + return ( readw ( rdc->regs + RDC_MMRD ) ); + } + + DBGC ( rdc, "RDC %p timed out waiting for MII read\n", rdc ); + return -ETIMEDOUT; +} + +/** + * Write to MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @v data Data to write + * @ret rc Return status code + */ +static int rdc_mii_write ( struct mii_interface *mdio, unsigned int phy, + unsigned int reg, unsigned int data ) { + struct rdc_nic *rdc = container_of ( mdio, struct rdc_nic, mdio ); + uint16_t mmdio; + unsigned int i; + + /* Initiate write */ + mmdio = ( RDC_MMDIO_MIIWR | RDC_MMDIO_PHYAD ( phy ) | + RDC_MMDIO_REGAD ( reg ) ); + writew ( data, rdc->regs + RDC_MMWD ); + writew ( mmdio, rdc->regs + RDC_MMDIO ); + + /* Wait for write to complete */ + for ( i = 0 ; i < RDC_MII_MAX_WAIT_US ; i++ ) { + + /* Check for write completion */ + if ( readw ( rdc->regs + RDC_MMDIO ) & RDC_MMDIO_MIIWR ) { + udelay ( 1 ); + continue; + } + + return 0; + } + + DBGC ( rdc, "RDC %p timed out waiting for MII write\n", rdc ); + return -ETIMEDOUT; +} + +/** RDC MII operations */ +static struct mii_operations rdc_mii_operations = { + .read = rdc_mii_read, + .write = rdc_mii_write, +}; + +/****************************************************************************** + * + * Link state + * + ****************************************************************************** + */ + +/** + * Initialise PHY + * + * @v rdc RDC device + * @ret rc Return status code + */ +static int rdc_init_phy ( struct rdc_nic *rdc ) { + int rc; + + /* Find PHY address */ + if ( ( rc = mii_find ( &rdc->mii ) ) != 0 ) { + DBGC ( rdc, "RDC %p could not find PHY address: %s\n", + rdc, strerror ( rc ) ); + return rc; + } + + /* Reset PHY */ + if ( ( rc = mii_reset ( &rdc->mii ) ) != 0 ) { + DBGC ( rdc, "RDC %p could not reset PHY: %s\n", + rdc, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Check link state + * + * @v netdev Network device + * @ret rc Return status code + */ +static int rdc_check_link ( struct net_device *netdev ) { + struct rdc_nic *rdc = netdev->priv; + int rc; + + /* Check link state */ + if ( ( rc = mii_check_link ( &rdc->mii, netdev ) ) != 0 ) { + DBGC ( rdc, "RDC %p could not check link: %s\n", + rdc, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Create descriptor ring + * + * @v rdc RDC device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int rdc_create_ring ( struct rdc_nic *rdc, struct rdc_ring *ring ) { + size_t len = ( ring->count * sizeof ( ring->desc[0] ) ); + struct rdc_descriptor *desc; + struct rdc_descriptor *next; + physaddr_t start; + unsigned int i; + + /* Allocate descriptor ring */ + ring->desc = dma_alloc ( rdc->dma, &ring->map, len, len ); + if ( ! ring->desc ) + return -ENOMEM; + + /* Initialise descriptor ring */ + memset ( ring->desc, 0, len ); + for ( i = 0 ; i < ring->count ; i++ ) { + desc = &ring->desc[i]; + next = &ring->desc[ ( i + 1 ) & ( ring->count - 1 ) ]; + desc->next = cpu_to_le32 ( dma ( &ring->map, next ) ); + } + + /* Program ring address */ + start = dma ( &ring->map, ring->desc ); + writew ( ( start >> 0 ), ( rdc->regs + ring->reg + RDC_MxDSA_LO ) ); + writew ( ( start >> 16 ), ( rdc->regs + ring->reg + RDC_MxDSA_HI ) ); + + DBGC ( rdc, "RDC %p ring %#02x is at [%08lx,%08lx)\n", + rdc, ring->reg, virt_to_phys ( ring->desc ), + ( virt_to_phys ( ring->desc ) + len ) ); + return 0; +} + +/** + * Destroy descriptor ring + * + * @v rdc RDC device + * @v ring Descriptor ring + */ +static void rdc_destroy_ring ( struct rdc_nic *rdc, struct rdc_ring *ring ) { + size_t len = ( ring->count * sizeof ( ring->desc[0] ) ); + + /* Clear ring address */ + writew ( 0, ( rdc->regs + ring->reg + RDC_MxDSA_LO ) ); + writew ( 0, ( rdc->regs + ring->reg + RDC_MxDSA_HI ) ); + + /* Free descriptors */ + dma_free ( &ring->map, ring->desc, len ); + ring->desc = NULL; + + /* Reset ring */ + ring->prod = 0; + ring->cons = 0; +} + +/** + * Refill receive descriptor ring + * + * @v rdc RDC device + */ +static void rdc_refill_rx ( struct rdc_nic *rdc ) { + struct rdc_descriptor *rx; + struct io_buffer *iobuf; + unsigned int rx_idx; + + /* Refill ring */ + while ( ( rdc->rx.prod - rdc->rx.cons ) < RDC_NUM_RX_DESC ) { + + /* Allocate I/O buffer */ + iobuf = alloc_rx_iob ( RDC_RX_MAX_LEN, rdc->dma ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Get next receive descriptor */ + rx_idx = ( rdc->rx.prod++ % RDC_NUM_RX_DESC ); + rx = &rdc->rx.desc[rx_idx]; + + /* Populate receive descriptor */ + rx->len = cpu_to_le16 ( RDC_RX_MAX_LEN ); + rx->addr = cpu_to_le32 ( iob_dma ( iobuf ) ); + wmb(); + rx->flags = cpu_to_le16 ( RDC_FL_OWNED ); + + /* Record I/O buffer */ + assert ( rdc->rx_iobuf[rx_idx] == NULL ); + rdc->rx_iobuf[rx_idx] = iobuf; + + DBGC2 ( rdc, "RDC %p RX %d is [%lx,%lx)\n", + rdc, rx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + RDC_RX_MAX_LEN ) ); + } +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int rdc_open ( struct net_device *netdev ) { + struct rdc_nic *rdc = netdev->priv; + int rc; + + /* Create transmit descriptor ring */ + if ( ( rc = rdc_create_ring ( rdc, &rdc->tx ) ) != 0 ) + goto err_create_tx; + + /* Create receive descriptor ring */ + if ( ( rc = rdc_create_ring ( rdc, &rdc->rx ) ) != 0 ) + goto err_create_rx; + + /* Program receive buffer length */ + writew ( RDC_RX_MAX_LEN, rdc->regs + RDC_MRBSR ); + + /* Enable transmit and receive */ + writew ( ( RDC_MCR0_FD | RDC_MCR0_TXEN | RDC_MCR0_PROMISC | + RDC_MCR0_RXEN ), + rdc->regs + RDC_MCR0 ); + + /* Enable PHY status polling */ + writew ( ( RDC_MPSCCR_EN | RDC_MPSCCR_PHYAD ( rdc->mii.address ) | + RDC_MPSCCR_SLOW ), + rdc->regs + RDC_MPSCCR ); + + /* Fill receive ring */ + rdc_refill_rx ( rdc ); + + /* Update link state */ + rdc_check_link ( netdev ); + + return 0; + + rdc_destroy_ring ( rdc, &rdc->rx ); + err_create_rx: + rdc_destroy_ring ( rdc, &rdc->tx ); + err_create_tx: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void rdc_close ( struct net_device *netdev ) { + struct rdc_nic *rdc = netdev->priv; + unsigned int i; + + /* Disable NIC */ + writew ( 0, rdc->regs + RDC_MCR0 ); + + /* Destroy receive descriptor ring */ + rdc_destroy_ring ( rdc, &rdc->rx ); + + /* Discard any unused receive buffers */ + for ( i = 0 ; i < RDC_NUM_RX_DESC ; i++ ) { + if ( rdc->rx_iobuf[i] ) + free_rx_iob ( rdc->rx_iobuf[i] ); + rdc->rx_iobuf[i] = NULL; + } + + /* Destroy transmit descriptor ring */ + rdc_destroy_ring ( rdc, &rdc->tx ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int rdc_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { + struct rdc_nic *rdc = netdev->priv; + struct rdc_descriptor *tx; + unsigned int tx_idx; + int rc; + + /* Get next transmit descriptor */ + if ( ( rdc->tx.prod - rdc->tx.cons ) >= RDC_NUM_TX_DESC ) { + DBGC ( rdc, "RDC %p out of transmit descriptors\n", rdc ); + return -ENOBUFS; + } + tx_idx = ( rdc->tx.prod % RDC_NUM_TX_DESC ); + tx = &rdc->tx.desc[tx_idx]; + + /* Pad to minimum length */ + iob_pad ( iobuf, ETH_ZLEN ); + + /* Map I/O buffer */ + if ( ( rc = iob_map_tx ( iobuf, rdc->dma ) ) != 0 ) + return rc; + + /* Update producer index */ + rdc->tx.prod++; + + /* Populate transmit descriptor */ + tx->len = cpu_to_le16 ( iob_len ( iobuf ) ); + tx->addr = cpu_to_le32 ( iob_dma ( iobuf ) ); + wmb(); + tx->flags = cpu_to_le16 ( RDC_FL_OWNED ); + wmb(); + + /* Notify card that there are packets ready to transmit */ + writew ( RDC_MTPR_TM2TX, rdc->regs + RDC_MTPR ); + + return 0; +} + +/** + * Poll for completed packets + * + * @v netdev Network device + */ +static void rdc_poll_tx ( struct net_device *netdev ) { + struct rdc_nic *rdc = netdev->priv; + struct rdc_descriptor *tx; + unsigned int tx_idx; + + /* Check for completed packets */ + while ( rdc->tx.cons != rdc->tx.prod ) { + + /* Get next transmit descriptor */ + tx_idx = ( rdc->tx.cons % RDC_NUM_TX_DESC ); + tx = &rdc->tx.desc[tx_idx]; + + /* Stop if descriptor is still in use */ + if ( tx->flags & cpu_to_le16 ( RDC_FL_OWNED ) ) + return; + DBGC2 ( rdc, "RDC %p TX %d complete\n", rdc, tx_idx ); + + /* Complete transmit descriptor */ + rdc->tx.cons++; + netdev_tx_complete_next ( netdev ); + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void rdc_poll_rx ( struct net_device *netdev ) { + struct rdc_nic *rdc = netdev->priv; + struct rdc_descriptor *rx; + struct io_buffer *iobuf; + unsigned int rx_idx; + size_t len; + + /* Check for received packets */ + while ( rdc->rx.cons != rdc->rx.prod ) { + + /* Get next receive descriptor */ + rx_idx = ( rdc->rx.cons % RDC_NUM_RX_DESC ); + rx = &rdc->rx.desc[rx_idx]; + + /* Stop if descriptor is still in use */ + if ( rx->flags & cpu_to_le16 ( RDC_FL_OWNED ) ) + return; + + /* Populate I/O buffer */ + iobuf = rdc->rx_iobuf[rx_idx]; + rdc->rx_iobuf[rx_idx] = NULL; + len = le16_to_cpu ( rx->len ); + iob_put ( iobuf, len ); + iob_unput ( iobuf, 4 /* strip CRC */ ); + + /* Hand off to network stack */ + if ( rx->flags & cpu_to_le16 ( RDC_FL_OK ) ) { + DBGC2 ( rdc, "RDC %p RX %d complete (length %zd)\n", + rdc, rx_idx, len ); + netdev_rx ( netdev, iobuf ); + } else { + DBGC2 ( rdc, "RDC %p RX %d error (length %zd, " + "flags %#04x)\n", rdc, rx_idx, len, + le16_to_cpu ( rx->flags ) ); + netdev_rx_err ( netdev, iobuf, -EIO ); + } + rdc->rx.cons++; + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void rdc_poll ( struct net_device *netdev ) { + struct rdc_nic *rdc = netdev->priv; + uint16_t misr; + + /* Check for (and acknowledge) interrupts */ + misr = readw ( rdc->regs + RDC_MISR ); + + /* Poll for TX completions, if applicable */ + if ( misr & RDC_MIRQ_TX ) + rdc_poll_tx ( netdev ); + + /* Poll for RX completions, if applicable */ + if ( misr & RDC_MIRQ_RX ) + rdc_poll_rx ( netdev ); + + /* Check link state, if applicable */ + if ( misr & RDC_MIRQ_LINK ) + rdc_check_link ( netdev ); + + /* Check for unexpected interrupts */ + if ( misr & ~( RDC_MIRQ_LINK | RDC_MIRQ_TX | RDC_MIRQ_RX_EARLY | + RDC_MIRQ_RX_EMPTY | RDC_MIRQ_RX ) ) { + DBGC ( rdc, "RDC %p unexpected MISR %#04x\n", rdc, misr ); + /* Report as a TX error */ + netdev_tx_err ( netdev, NULL, -ENOTSUP ); + } + + /* Refill receive ring */ + rdc_refill_rx ( rdc ); +} + +/** + * Enable or disable interrupts + * + * @v netdev Network device + * @v enable Interrupts should be enabled + */ +static void rdc_irq ( struct net_device *netdev, int enable ) { + struct rdc_nic *rdc = netdev->priv; + uint16_t mier; + + /* Enable/disable interrupts */ + mier = ( enable ? ( RDC_MIRQ_LINK | RDC_MIRQ_TX | RDC_MIRQ_RX ) : 0 ); + writew ( mier, rdc->regs + RDC_MIER ); +} + +/** RDC network device operations */ +static struct net_device_operations rdc_operations = { + .open = rdc_open, + .close = rdc_close, + .transmit = rdc_transmit, + .poll = rdc_poll, + .irq = rdc_irq, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int rdc_probe ( struct pci_device *pci ) { + struct net_device *netdev; + struct rdc_nic *rdc; + union rdc_mac mac; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *rdc ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &rdc_operations ); + rdc = netdev->priv; + pci_set_drvdata ( pci, netdev ); + netdev->dev = &pci->dev; + memset ( rdc, 0, sizeof ( *rdc ) ); + rdc->dma = &pci->dma; + mdio_init ( &rdc->mdio, &rdc_mii_operations ); + mii_init ( &rdc->mii, &rdc->mdio, 0 ); + rdc_init_ring ( &rdc->tx, RDC_NUM_TX_DESC, RDC_MTDSA ); + rdc_init_ring ( &rdc->rx, RDC_NUM_RX_DESC, RDC_MRDSA ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Map registers */ + rdc->regs = pci_ioremap ( pci, pci->membase, RDC_BAR_SIZE ); + if ( ! rdc->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Fetch MAC address */ + mac.mid[0] = cpu_to_le16 ( readw ( rdc->regs + RDC_MID0 ) ); + mac.mid[1] = cpu_to_le16 ( readw ( rdc->regs + RDC_MID1 ) ); + mac.mid[2] = cpu_to_le16 ( readw ( rdc->regs + RDC_MID2 ) ); + memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN ); + + /* Reset the NIC */ + if ( ( rc = rdc_reset ( rdc ) ) != 0 ) + goto err_reset; + + /* Initialise PHY */ + if ( ( rc = rdc_init_phy ( rdc ) ) != 0 ) + goto err_init_phy; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Set initial link state */ + rdc_check_link ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_init_phy: + rdc_reset ( rdc ); + err_reset: + iounmap ( rdc->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void rdc_remove ( struct pci_device *pci ) { + struct net_device *netdev = pci_get_drvdata ( pci ); + struct rdc_nic *rdc = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Reset card */ + rdc_reset ( rdc ); + + /* Free network device */ + iounmap ( rdc->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** RDC PCI device IDs */ +static struct pci_device_id rdc_nics[] = { + PCI_ROM ( 0x17f3, 0x6040, "r6040", "RDC R6040", 0 ), +}; + +/** RDC PCI driver */ +struct pci_driver rdc_driver __pci_driver = { + .ids = rdc_nics, + .id_count = ( sizeof ( rdc_nics ) / sizeof ( rdc_nics[0] ) ), + .probe = rdc_probe, + .remove = rdc_remove, +}; diff --git a/src/drivers/net/rdc.h b/src/drivers/net/rdc.h new file mode 100644 index 000000000..ee1671fdd --- /dev/null +++ b/src/drivers/net/rdc.h @@ -0,0 +1,194 @@ +#ifndef _RDC_H +#define _RDC_H + +/** @file + * + * RDC R6040 network driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** RDC BAR size */ +#define RDC_BAR_SIZE 256 + +/** An RDC descriptor */ +struct rdc_descriptor { + /** Flags */ + uint16_t flags; + /** Length */ + uint16_t len; + /** Address */ + uint32_t addr; + /** Next descriptor */ + uint32_t next; + /** Reserved */ + uint32_t reserved; +} __attribute__ (( packed )); + +/** Descriptor is owned by NIC */ +#define RDC_FL_OWNED 0x8000 + +/** Packet OK */ +#define RDC_FL_OK 0x4000 + +/** MAC control register 0 */ +#define RDC_MCR0 0x00 +#define RDC_MCR0_FD 0x8000 /**< Full duplex */ +#define RDC_MCR0_TXEN 0x1000 /**< Transmit enable */ +#define RDC_MCR0_PROMISC 0x0020 /**< Promiscuous mode */ +#define RDC_MCR0_RXEN 0x0002 /**< Receive enable */ + +/** MAC control register 1 */ +#define RDC_MCR1 0x04 +#define RDC_MCR1_RST 0x0001 /**< MAC reset */ + +/** Maximum time to wait for reset */ +#define RDC_RESET_MAX_WAIT_MS 10 + +/** MAC transmit poll command register */ +#define RDC_MTPR 0x14 +#define RDC_MTPR_TM2TX 0x0001 /**< Trigger MAC to transmit */ + +/** MAC receive buffer size register */ +#define RDC_MRBSR 0x18 + +/** MAC MDIO control register */ +#define RDC_MMDIO 0x20 +#define RDC_MMDIO_MIIWR 0x4000 /**< MDIO write */ +#define RDC_MMDIO_MIIRD 0x2000 /**< MDIO read */ +#define RDC_MMDIO_PHYAD(x) ( (x) << 8 ) /**< PHY address */ +#define RDC_MMDIO_REGAD(x) ( (x) << 0 ) /**< Register address */ + +/** Maximum time to wait for an MII read or write */ +#define RDC_MII_MAX_WAIT_US 2048 + +/** MAC MDIO read data register */ +#define RDC_MMRD 0x24 + +/** MAC MDIO write data register */ +#define RDC_MMWD 0x28 + +/** MAC transmit descriptor start address */ +#define RDC_MTDSA 0x2c + +/** MAC receive descriptor start address */ +#define RDC_MRDSA 0x34 + +/** MAC descriptor start address low half */ +#define RDC_MxDSA_LO 0x0 + +/** MAC descriptor start address low half */ +#define RDC_MxDSA_HI 0x4 + +/** MAC interrupt status register */ +#define RDC_MISR 0x3c +#define RDC_MIRQ_LINK 0x0200 /**< Link status changed */ +#define RDC_MIRQ_TX 0x0010 /**< Transmit complete */ +#define RDC_MIRQ_RX_EARLY 0x0008 /**< Receive early interrupt */ +#define RDC_MIRQ_RX_EMPTY 0x0002 /**< Receive descriptor unavailable */ +#define RDC_MIRQ_RX 0x0001 /**< Receive complete */ + +/** MAC interrupt enable register */ +#define RDC_MIER 0x40 + +/** MAC address word 0 */ +#define RDC_MID0 0x68 + +/** MAC address word 1 */ +#define RDC_MID1 0x6a + +/** MAC address word 2 */ +#define RDC_MID2 0x6c + +/** MAC PHY status change configuration register */ +#define RDC_MPSCCR 0x88 +#define RDC_MPSCCR_EN 0x8000 /**< PHY status change enable */ +#define RDC_MPSCCR_PHYAD(x) ( (x) << 8 ) /**< PHY address */ +#define RDC_MPSCCR_SLOW 0x0007 /**< Poll slowly */ + +/** MAC state machine register */ +#define RDC_MACSM 0xac +#define RDC_MACSM_RST 0x0002 /**< Reset state machine */ + +/** Time to wait after resetting MAC state machine */ +#define RDC_MACSM_RESET_DELAY_MS 10 + +/** A MAC address */ +union rdc_mac { + /** Raw bytes */ + uint8_t raw[ETH_ALEN]; + /** MIDx registers */ + uint16_t mid[ ETH_ALEN / 2 ]; +}; + +/** A descriptor ring */ +struct rdc_ring { + /** Descriptors */ + struct rdc_descriptor *desc; + /** Descriptor ring DMA mapping */ + struct dma_mapping map; + /** Producer index */ + unsigned int prod; + /** Consumer index */ + unsigned int cons; + + /** Number of descriptors */ + unsigned int count; + /** Start address register 0 */ + unsigned int reg; +}; + +/** + * Initialise descriptor ring + * + * @v ring Descriptor ring + * @v count Number of descriptors + * @v reg Start address register 0 + */ +static inline __attribute__ (( always_inline )) void +rdc_init_ring ( struct rdc_ring *ring, unsigned int count, unsigned int reg ) { + + ring->count = count; + ring->reg = reg; +} + +/** Number of transmit descriptors + * + * This is a policy decision. + */ +#define RDC_NUM_TX_DESC 16 + +/** Number of receive descriptors + * + * This is a policy decision. + */ +#define RDC_NUM_RX_DESC 8 + +/** Receive buffer length */ +#define RDC_RX_MAX_LEN ( ETH_FRAME_LEN + 4 /* VLAN */ + 4 /* CRC */ ) + +/** An RDC network card */ +struct rdc_nic { + /** Registers */ + void *regs; + /** DMA device */ + struct dma_device *dma; + /** MII interface */ + struct mii_interface mdio; + /** MII device */ + struct mii_device mii; + + /** Transmit descriptor ring */ + struct rdc_ring tx; + /** Receive descriptor ring */ + struct rdc_ring rx; + /** Receive I/O buffers */ + struct io_buffer *rx_iobuf[RDC_NUM_RX_DESC]; +}; + +#endif /* _RDC_H */ diff --git a/src/drivers/net/realtek.c b/src/drivers/net/realtek.c index 310b9f96a..a43efb68b 100644 --- a/src/drivers/net/realtek.c +++ b/src/drivers/net/realtek.c @@ -36,6 +36,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include #include @@ -419,6 +420,16 @@ static int realtek_phy_reset ( struct realtek_nic *rtl ) { */ } + /* Some cards (e.g. RTL8211B) have a hardware errata that + * requires the MII_MMD_DATA register to be cleared before the + * link will come up. + */ + if ( ( rc = mii_write ( &rtl->mii, MII_MMD_DATA, 0 ) ) != 0 ) { + /* Ignore failures, since the register may not be + * present on all PHYs. + */ + } + /* Restart autonegotiation */ if ( ( rc = mii_restart ( &rtl->mii ) ) != 0 ) { DBGC ( rtl, "REALTEK %p could not restart MII: %s\n", @@ -505,44 +516,27 @@ static void realtek_check_link ( struct net_device *netdev ) { * @ret rc Return status code */ static int realtek_create_buffer ( struct realtek_nic *rtl ) { + struct realtek_rx_buffer *rxbuf = &rtl->rxbuf; size_t len = ( RTL_RXBUF_LEN + RTL_RXBUF_PAD ); - physaddr_t address; - int rc; /* Do nothing unless in legacy mode */ if ( ! rtl->legacy ) return 0; /* Allocate buffer */ - rtl->rx_buffer = malloc_dma ( len, RTL_RXBUF_ALIGN ); - if ( ! rtl->rx_buffer ) { - rc = -ENOMEM; - goto err_alloc; - } - address = virt_to_bus ( rtl->rx_buffer ); - - /* Check that card can support address */ - if ( address & ~0xffffffffULL ) { - DBGC ( rtl, "REALTEK %p cannot support 64-bit RX buffer " - "address\n", rtl ); - rc = -ENOTSUP; - goto err_64bit; - } + rxbuf->data = dma_alloc ( rtl->dma, &rxbuf->map, len, + RTL_RXBUF_ALIGN ); + if ( ! rxbuf->data ) + return -ENOMEM; /* Program buffer address */ - writel ( address, rtl->regs + RTL_RBSTART ); - DBGC ( rtl, "REALTEK %p receive buffer is at [%08llx,%08llx,%08llx)\n", - rtl, ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + RTL_RXBUF_LEN ), - ( ( unsigned long long ) address + len ) ); + writel ( dma ( &rxbuf->map, rxbuf->data ), rtl->regs + RTL_RBSTART ); + DBGC ( rtl, "REALTEK %p receive buffer is at [%08lx,%08lx,%08lx)\n", + rtl, virt_to_phys ( rxbuf->data ), + ( virt_to_phys ( rxbuf->data ) + RTL_RXBUF_LEN ), + ( virt_to_phys ( rxbuf->data ) + len ) ); return 0; - - err_64bit: - free_dma ( rtl->rx_buffer, len ); - rtl->rx_buffer = NULL; - err_alloc: - return rc; } /** @@ -551,6 +545,7 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) { * @v rtl Realtek device */ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) { + struct realtek_rx_buffer *rxbuf = &rtl->rxbuf; size_t len = ( RTL_RXBUF_LEN + RTL_RXBUF_PAD ); /* Do nothing unless in legacy mode */ @@ -561,9 +556,9 @@ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) { writel ( 0, rtl->regs + RTL_RBSTART ); /* Free buffer */ - free_dma ( rtl->rx_buffer, len ); - rtl->rx_buffer = NULL; - rtl->rx_offset = 0; + dma_free ( &rxbuf->map, rxbuf->data, len ); + rxbuf->data = NULL; + rxbuf->offset = 0; } /** @@ -582,7 +577,8 @@ static int realtek_create_ring ( struct realtek_nic *rtl, return 0; /* Allocate descriptor ring */ - ring->desc = malloc_dma ( ring->len, RTL_RING_ALIGN ); + ring->desc = dma_alloc ( rtl->dma, &ring->map, ring->len, + RTL_RING_ALIGN ); if ( ! ring->desc ) return -ENOMEM; @@ -590,13 +586,13 @@ static int realtek_create_ring ( struct realtek_nic *rtl, memset ( ring->desc, 0, ring->len ); /* Program ring address */ - address = virt_to_bus ( ring->desc ); + address = dma ( &ring->map, ring->desc ); writel ( ( ( ( uint64_t ) address ) >> 32 ), rtl->regs + ring->reg + 4 ); writel ( ( address & 0xffffffffUL ), rtl->regs + ring->reg ); - DBGC ( rtl, "REALTEK %p ring %02x is at [%08llx,%08llx)\n", - rtl, ring->reg, ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + ring->len ) ); + DBGC ( rtl, "REALTEK %p ring %02x is at [%08lx,%08lx)\n", + rtl, ring->reg, virt_to_phys ( ring->desc ), + ( virt_to_phys ( ring->desc ) + ring->len ) ); return 0; } @@ -623,7 +619,7 @@ static void realtek_destroy_ring ( struct realtek_nic *rtl, writel ( 0, rtl->regs + ring->reg + 4 ); /* Free descriptor ring */ - free_dma ( ring->desc, ring->len ); + dma_free ( &ring->map, ring->desc, ring->len ); ring->desc = NULL; } @@ -636,7 +632,6 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) { struct realtek_descriptor *rx; struct io_buffer *iobuf; unsigned int rx_idx; - physaddr_t address; int is_last; /* Do nothing in legacy mode */ @@ -646,7 +641,7 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) { while ( ( rtl->rx.prod - rtl->rx.cons ) < RTL_NUM_RX_DESC ) { /* Allocate I/O buffer */ - iobuf = alloc_iob ( RTL_RX_MAX_LEN ); + iobuf = alloc_rx_iob ( RTL_RX_MAX_LEN, rtl->dma ); if ( ! iobuf ) { /* Wait for next refill */ return; @@ -658,8 +653,7 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) { rx = &rtl->rx.desc[rx_idx]; /* Populate receive descriptor */ - address = virt_to_bus ( iobuf->data ); - rx->address = cpu_to_le64 ( address ); + rx->address = cpu_to_le64 ( iob_dma ( iobuf ) ); rx->length = cpu_to_le16 ( RTL_RX_MAX_LEN ); wmb(); rx->flags = ( cpu_to_le16 ( RTL_DESC_OWN ) | @@ -670,9 +664,9 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) { assert ( rtl->rx_iobuf[rx_idx] == NULL ); rtl->rx_iobuf[rx_idx] = iobuf; - DBGC2 ( rtl, "REALTEK %p RX %d is [%llx,%llx)\n", rtl, rx_idx, - ( ( unsigned long long ) address ), - ( ( unsigned long long ) address + RTL_RX_MAX_LEN ) ); + DBGC2 ( rtl, "REALTEK %p RX %d is [%lx,%lx)\n", + rtl, rx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + RTL_RX_MAX_LEN ) ); } } @@ -762,12 +756,16 @@ static void realtek_close ( struct net_device *netdev ) { /* Discard any unused receive buffers */ for ( i = 0 ; i < RTL_NUM_RX_DESC ; i++ ) { if ( rtl->rx_iobuf[i] ) - free_iob ( rtl->rx_iobuf[i] ); + free_rx_iob ( rtl->rx_iobuf[i] ); rtl->rx_iobuf[i] = NULL; } /* Destroy transmit descriptor ring */ realtek_destroy_ring ( rtl, &rtl->tx ); + + /* Reset legacy transmit descriptor index, if applicable */ + if ( rtl->legacy ) + realtek_reset ( rtl ); } /** @@ -782,42 +780,41 @@ static int realtek_transmit ( struct net_device *netdev, struct realtek_nic *rtl = netdev->priv; struct realtek_descriptor *tx; unsigned int tx_idx; - physaddr_t address; int is_last; + int rc; /* Get next transmit descriptor */ if ( ( rtl->tx.prod - rtl->tx.cons ) >= RTL_NUM_TX_DESC ) { netdev_tx_defer ( netdev, iobuf ); return 0; } - tx_idx = ( rtl->tx.prod++ % RTL_NUM_TX_DESC ); + tx_idx = ( rtl->tx.prod % RTL_NUM_TX_DESC ); + + /* Pad and align packet, if needed */ + if ( rtl->legacy ) + iob_pad ( iobuf, ETH_ZLEN ); + + /* Map I/O buffer */ + if ( ( rc = iob_map_tx ( iobuf, rtl->dma ) ) != 0 ) + return rc; + + /* Update producer index */ + rtl->tx.prod++; /* Transmit packet */ if ( rtl->legacy ) { - /* Pad and align packet */ - iob_pad ( iobuf, ETH_ZLEN ); - address = virt_to_bus ( iobuf->data ); - - /* Check that card can support address */ - if ( address & ~0xffffffffULL ) { - DBGC ( rtl, "REALTEK %p cannot support 64-bit TX " - "buffer address\n", rtl ); - return -ENOTSUP; - } - /* Add to transmit ring */ - writel ( address, rtl->regs + RTL_TSAD ( tx_idx ) ); + writel ( iob_dma ( iobuf ), rtl->regs + RTL_TSAD ( tx_idx ) ); writel ( ( RTL_TSD_ERTXTH_DEFAULT | iob_len ( iobuf ) ), rtl->regs + RTL_TSD ( tx_idx ) ); } else { /* Populate transmit descriptor */ - address = virt_to_bus ( iobuf->data ); is_last = ( tx_idx == ( RTL_NUM_TX_DESC - 1 ) ); tx = &rtl->tx.desc[tx_idx]; - tx->address = cpu_to_le64 ( address ); + tx->address = cpu_to_le64 ( iob_dma ( iobuf ) ); tx->length = cpu_to_le16 ( iob_len ( iobuf ) ); wmb(); tx->flags = ( cpu_to_le16 ( RTL_DESC_OWN | RTL_DESC_FS | @@ -829,10 +826,9 @@ static int realtek_transmit ( struct net_device *netdev, writeb ( RTL_TPPOLL_NPQ, rtl->regs + rtl->tppoll ); } - DBGC2 ( rtl, "REALTEK %p TX %d is [%llx,%llx)\n", rtl, tx_idx, - ( ( unsigned long long ) virt_to_bus ( iobuf->data ) ), - ( ( ( unsigned long long ) virt_to_bus ( iobuf->data ) ) + - iob_len ( iobuf ) ) ); + DBGC2 ( rtl, "REALTEK %p TX %d is [%lx,%lx)\n", + rtl, tx_idx, virt_to_phys ( iobuf->data ), + virt_to_phys ( iobuf->data ) + iob_len ( iobuf ) ); return 0; } @@ -892,12 +888,12 @@ static void realtek_legacy_poll_rx ( struct net_device *netdev ) { while ( ! ( readb ( rtl->regs + RTL_CR ) & RTL_CR_BUFE ) ) { /* Extract packet from receive buffer */ - rx = ( rtl->rx_buffer + rtl->rx_offset ); + rx = ( rtl->rxbuf.data + rtl->rxbuf.offset ); len = le16_to_cpu ( rx->length ); if ( rx->status & cpu_to_le16 ( RTL_STAT_ROK ) ) { DBGC2 ( rtl, "REALTEK %p RX offset %x+%zx\n", - rtl, rtl->rx_offset, len ); + rtl, rtl->rxbuf.offset, len ); /* Allocate I/O buffer */ iobuf = alloc_iob ( len ); @@ -917,16 +913,16 @@ static void realtek_legacy_poll_rx ( struct net_device *netdev ) { } else { DBGC ( rtl, "REALTEK %p RX offset %x+%zx error %04x\n", - rtl, rtl->rx_offset, len, + rtl, rtl->rxbuf.offset, len, le16_to_cpu ( rx->status ) ); netdev_rx_err ( netdev, NULL, -EIO ); } /* Update buffer offset */ - rtl->rx_offset = ( rtl->rx_offset + sizeof ( *rx ) + len ); - rtl->rx_offset = ( ( rtl->rx_offset + 3 ) & ~3 ); - rtl->rx_offset = ( rtl->rx_offset % RTL_RXBUF_LEN ); - writew ( ( rtl->rx_offset - 16 ), rtl->regs + RTL_CAPR ); + rtl->rxbuf.offset += ( sizeof ( *rx ) + len ); + rtl->rxbuf.offset = ( ( rtl->rxbuf.offset + 3 ) & ~3 ); + rtl->rxbuf.offset = ( rtl->rxbuf.offset % RTL_RXBUF_LEN ); + writew ( ( rtl->rxbuf.offset - 16 ), rtl->regs + RTL_CAPR ); /* Give chip time to react before rechecking RTL_CR */ readw ( rtl->regs + RTL_CAPR ); @@ -1084,11 +1080,13 @@ static void realtek_detect ( struct realtek_nic *rtl ) { DBGC ( rtl, "REALTEK %p appears to be an RTL8169\n", rtl ); rtl->have_phy_regs = 1; rtl->tppoll = RTL_TPPOLL_8169; + dma_set_mask_64bit ( rtl->dma ); } else { if ( ( check_cpcr == cpcr ) && ( cpcr != 0xffff ) ) { DBGC ( rtl, "REALTEK %p appears to be an RTL8139C+\n", rtl ); rtl->tppoll = RTL_TPPOLL_8139CP; + dma_set_mask_64bit ( rtl->dma ); } else { DBGC ( rtl, "REALTEK %p appears to be an RTL8139\n", rtl ); @@ -1128,12 +1126,15 @@ static int realtek_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - rtl->regs = ioremap ( pci->membase, RTL_BAR_SIZE ); + rtl->regs = pci_ioremap ( pci, pci->membase, RTL_BAR_SIZE ); if ( ! rtl->regs ) { rc = -ENODEV; goto err_ioremap; } + /* Configure DMA */ + rtl->dma = &pci->dma; + /* Reset the NIC */ if ( ( rc = realtek_reset ( rtl ) ) != 0 ) goto err_reset; diff --git a/src/drivers/net/realtek.h b/src/drivers/net/realtek.h index 4d13784c4..d4642fd76 100644 --- a/src/drivers/net/realtek.h +++ b/src/drivers/net/realtek.h @@ -247,6 +247,8 @@ enum realtek_legacy_status { struct realtek_ring { /** Descriptors */ struct realtek_descriptor *desc; + /** Descriptor ring DMA mapping */ + struct dma_mapping map; /** Producer index */ unsigned int prod; /** Consumer index */ @@ -272,10 +274,22 @@ realtek_init_ring ( struct realtek_ring *ring, unsigned int count, ring->reg = reg; } +/** Receive buffer (legacy mode *) */ +struct realtek_rx_buffer { + /** Buffer */ + void *data; + /** Buffer DMA mapping */ + struct dma_mapping map; + /** Offset within buffer */ + unsigned int offset; +}; + /** A Realtek network card */ struct realtek_nic { /** Registers */ void *regs; + /** DMA device */ + struct dma_device *dma; /** SPI bit-bashing interface */ struct spi_bit_basher spibit; /** EEPROM */ @@ -301,9 +315,7 @@ struct realtek_nic { /** Receive I/O buffers */ struct io_buffer *rx_iobuf[RTL_NUM_RX_DESC]; /** Receive buffer (legacy mode) */ - void *rx_buffer; - /** Offset within receive buffer (legacy mode) */ - unsigned int rx_offset; + struct realtek_rx_buffer rxbuf; }; #endif /* _REALTEK_H */ diff --git a/src/drivers/net/rhine.c b/src/drivers/net/rhine.c index a1dc58725..f4d3a2580 100644 --- a/src/drivers/net/rhine.c +++ b/src/drivers/net/rhine.c @@ -292,7 +292,7 @@ static int rhine_create_ring ( struct rhine_nic *rhn, unsigned int i; /* Allocate descriptors */ - ring->desc = malloc_dma ( len, RHINE_RING_ALIGN ); + ring->desc = malloc_phys ( len, RHINE_RING_ALIGN ); if ( ! ring->desc ) return -ENOMEM; @@ -328,7 +328,7 @@ static void rhine_destroy_ring ( struct rhine_nic *rhn, writel ( 0, rhn->regs + ring->reg ); /* Free descriptor ring */ - free_dma ( ring->desc, len ); + free_phys ( ring->desc, len ); ring->desc = NULL; ring->prod = 0; ring->cons = 0; @@ -700,7 +700,7 @@ static int rhine_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - rhn->regs = ioremap ( pci->membase, RHINE_BAR_SIZE ); + rhn->regs = pci_ioremap ( pci, pci->membase, RHINE_BAR_SIZE ); rhn->ioaddr = pci->ioaddr; DBGC ( rhn, "RHINE %p regs at %08lx, I/O at %04lx\n", rhn, pci->membase, pci->ioaddr ); diff --git a/src/drivers/net/rtl818x/rtl818x.c b/src/drivers/net/rtl818x/rtl818x.c index f5082084e..599d36fad 100644 --- a/src/drivers/net/rtl818x/rtl818x.c +++ b/src/drivers/net/rtl818x/rtl818x.c @@ -328,8 +328,8 @@ static int rtl818x_init_rx_ring(struct net80211_device *dev) struct rtl818x_rx_desc *entry; int i; - priv->rx_ring = malloc_dma(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE, - RTL818X_RING_ALIGN); + priv->rx_ring = malloc_phys(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE, + RTL818X_RING_ALIGN); priv->rx_ring_dma = virt_to_bus(priv->rx_ring); if (!priv->rx_ring) { DBG("rtl818x %s: cannot allocate RX ring\n", dev->netdev->name); @@ -364,7 +364,7 @@ static void rtl818x_free_rx_ring(struct net80211_device *dev) priv->rx_buf[i] = NULL; } - free_dma(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE); + free_phys(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE); priv->rx_ring = NULL; } @@ -373,8 +373,8 @@ static int rtl818x_init_tx_ring(struct net80211_device *dev) struct rtl818x_priv *priv = dev->priv; int i; - priv->tx_ring = malloc_dma(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE, - RTL818X_RING_ALIGN); + priv->tx_ring = malloc_phys(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE, + RTL818X_RING_ALIGN); priv->tx_ring_dma = virt_to_bus(priv->tx_ring); if (!priv->tx_ring) { DBG("rtl818x %s: cannot allocate TX ring\n", dev->netdev->name); @@ -402,7 +402,7 @@ static void rtl818x_free_tx_ring(struct net80211_device *dev) priv->tx_buf[i] = NULL; } - free_dma(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE); + free_phys(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE); priv->tx_ring = NULL; } diff --git a/src/drivers/net/sfc/efx_common.c b/src/drivers/net/sfc/efx_common.c index 403887707..ad572b1da 100644 --- a/src/drivers/net/sfc/efx_common.c +++ b/src/drivers/net/sfc/efx_common.c @@ -2,11 +2,12 @@ * * Driver datapath common code for Solarflare network cards * - * Written by Shradha Shah + * Written by Shradha Shah, maintained by * * Copyright Fen Systems Ltd. 2005 * Copyright Level 5 Networks Inc. 2005 - * Copyright 2006-2017 Solarflare Communications Inc. + * Copyright 2006-2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -85,7 +86,7 @@ void efx_probe(struct net_device *netdev, enum efx_revision revision) efx->mmio_start = pci_bar_start(pci, reg); efx->mmio_len = pci_bar_size(pci, reg); - efx->membase = ioremap(efx->mmio_start, efx->mmio_len); + efx->membase = pci_ioremap(pci, efx->mmio_start, efx->mmio_len); DBGCP(efx, "BAR of %lx bytes at phys %lx mapped at %p\n", efx->mmio_len, efx->mmio_start, efx->membase); diff --git a/src/drivers/net/sfc/efx_hunt.c b/src/drivers/net/sfc/efx_hunt.c index 07dd7dfea..0bce3e45a 100644 --- a/src/drivers/net/sfc/efx_hunt.c +++ b/src/drivers/net/sfc/efx_hunt.c @@ -2,9 +2,10 @@ * * Driver datapath for Solarflare network cards * - * Written by Shradha Shah + * Written by Shradha Shah, maintained by * - * Copyright 2012-2017 Solarflare Communications Inc. + * Copyright 2012-2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -37,7 +38,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); void efx_hunt_free_special_buffer(void *buf, int bytes) { - free_dma(buf, bytes); + free_phys(buf, bytes); } static void *efx_hunt_alloc_special_buffer(int bytes, @@ -50,7 +51,7 @@ static void *efx_hunt_alloc_special_buffer(int bytes, * buffer will be passed into an MC_CMD_INIT_*Q command to setup the * appropriate type of queue via MCDI. */ - buffer = malloc_dma(bytes, EFX_BUF_ALIGN); + buffer = malloc_phys(bytes, EFX_BUF_ALIGN); if (!buffer) return NULL; diff --git a/src/drivers/net/sfc/efx_hunt.h b/src/drivers/net/sfc/efx_hunt.h index b8377bf20..d6bb4659f 100644 --- a/src/drivers/net/sfc/efx_hunt.h +++ b/src/drivers/net/sfc/efx_hunt.h @@ -2,9 +2,10 @@ * * GPL net driver for Solarflare network cards * - * Written by Shradha Shah + * Written by Shradha Shah, maintained by * - * Copyright 2012-2017 Solarflare Communications Inc. + * Copyright 2012-2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/src/drivers/net/sfc/mcdi.h b/src/drivers/net/sfc/mcdi.h index 19c62021a..2d26cded3 100644 --- a/src/drivers/net/sfc/mcdi.h +++ b/src/drivers/net/sfc/mcdi.h @@ -1,9 +1,11 @@ /**************************************************************************** * Driver for Solarflare network controllers and boards * - * Written by Martin Habets + * Written by Martin Habets , maintained + * by * - * Copyright 2012-2017 Solarflare Communications Inc. + * Copyright 2012-2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/src/drivers/net/sfc/sfc_hunt.c b/src/drivers/net/sfc/sfc_hunt.c index dd5f7043f..a37670ae2 100644 --- a/src/drivers/net/sfc/sfc_hunt.c +++ b/src/drivers/net/sfc/sfc_hunt.c @@ -2,9 +2,10 @@ * * Device driver for Solarflare Communications EF10 devices * - * Written by Shradha Shah + * Written by Shradha Shah, maintained by * - * Copyright 2012-2017 Solarflare Communications Inc. + * Copyright 2012-2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -1314,6 +1315,8 @@ const struct efx_nic_type hunt_nic_type = { }; static struct pci_device_id hunt_nics[] = { + PCI_ROM(0x1924, 0x0903, "SFC9120", "Solarflare SFC9120 Adapter", 0), + PCI_ROM(0x1924, 0x0923, "SFC9140", "Solarflare SFC9140 Adapter", 0), PCI_ROM(0x1924, 0x0a03, "SFC9220", "Solarflare SFN8xxx Adapter", 0), PCI_ROM(0x1924, 0x0b03, "SFC9250", "Solarflare X25xx Adapter", 0), }; diff --git a/src/drivers/net/sis190.c b/src/drivers/net/sis190.c index b92e95f2a..0e4f0762e 100644 --- a/src/drivers/net/sis190.c +++ b/src/drivers/net/sis190.c @@ -552,7 +552,7 @@ static int sis190_open(struct net_device *dev) int rc; /* Allocate TX ring */ - tp->TxDescRing = malloc_dma(TX_RING_BYTES, RING_ALIGNMENT); + tp->TxDescRing = malloc_phys(TX_RING_BYTES, RING_ALIGNMENT); if (!tp->TxDescRing) { DBG("sis190: TX ring allocation failed\n"); rc = -ENOMEM; @@ -561,7 +561,7 @@ static int sis190_open(struct net_device *dev) tp->tx_dma = cpu_to_le32(virt_to_bus(tp->TxDescRing)); /* Allocate RX ring */ - tp->RxDescRing = malloc_dma(RX_RING_BYTES, RING_ALIGNMENT); + tp->RxDescRing = malloc_phys(RX_RING_BYTES, RING_ALIGNMENT); if (!tp->RxDescRing) { DBG("sis190: RX ring allocation failed\n"); rc = -ENOMEM; @@ -600,8 +600,8 @@ static void sis190_free(struct net_device *dev) struct sis190_private *tp = netdev_priv(dev); int i; - free_dma(tp->TxDescRing, TX_RING_BYTES); - free_dma(tp->RxDescRing, RX_RING_BYTES); + free_phys(tp->TxDescRing, TX_RING_BYTES); + free_phys(tp->RxDescRing, RX_RING_BYTES); tp->TxDescRing = NULL; tp->RxDescRing = NULL; @@ -886,7 +886,7 @@ static int sis190_init_board(struct pci_device *pdev, struct net_device **netdev adjust_pci_device(pdev); - ioaddr = ioremap(pdev->membase, SIS190_REGS_SIZE); + ioaddr = pci_ioremap(pdev, pdev->membase, SIS190_REGS_SIZE); if (!ioaddr) { DBG("sis190: cannot remap MMIO, aborting\n"); rc = -EIO; diff --git a/src/drivers/net/skeleton.c b/src/drivers/net/skeleton.c index 0bae3089c..a76c6e3d9 100644 --- a/src/drivers/net/skeleton.c +++ b/src/drivers/net/skeleton.c @@ -195,7 +195,7 @@ static int skeleton_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - skel->regs = ioremap ( pci->membase, SKELETON_BAR_SIZE ); + skel->regs = pci_ioremap ( pci, pci->membase, SKELETON_BAR_SIZE ); if ( ! skel->regs ) { rc = -ENODEV; goto err_ioremap; diff --git a/src/drivers/net/skge.c b/src/drivers/net/skge.c index c3264225b..5aa5e2a6a 100755 --- a/src/drivers/net/skge.c +++ b/src/drivers/net/skge.c @@ -1699,7 +1699,7 @@ void skge_free(struct net_device *dev) free(skge->tx_ring.start); skge->tx_ring.start = NULL; - free_dma(skge->mem, RING_SIZE); + free_phys(skge->mem, RING_SIZE); skge->mem = NULL; skge->dma = 0; } @@ -1714,7 +1714,7 @@ static int skge_up(struct net_device *dev) DBG2(PFX "%s: enabling interface\n", dev->name); - skge->mem = malloc_dma(RING_SIZE, SKGE_RING_ALIGN); + skge->mem = malloc_phys(RING_SIZE, SKGE_RING_ALIGN); skge->dma = virt_to_bus(skge->mem); if (!skge->mem) return -ENOMEM; @@ -2346,8 +2346,9 @@ static int skge_probe(struct pci_device *pdev) hw->pdev = pdev; - hw->regs = (unsigned long)ioremap(pci_bar_start(pdev, PCI_BASE_ADDRESS_0), - SKGE_REG_SIZE); + hw->regs = (unsigned long)pci_ioremap(pdev, + pci_bar_start(pdev, PCI_BASE_ADDRESS_0), + SKGE_REG_SIZE); if (!hw->regs) { DBG(PFX "cannot map device registers\n"); goto err_out_free_hw; diff --git a/src/drivers/net/sky2.c b/src/drivers/net/sky2.c index 211f22466..9d612c997 100644 --- a/src/drivers/net/sky2.c +++ b/src/drivers/net/sky2.c @@ -1112,10 +1112,10 @@ nomem: /* Free the le and ring buffers */ static void sky2_free_rings(struct sky2_port *sky2) { - free_dma(sky2->rx_le, RX_LE_BYTES); + free_phys(sky2->rx_le, RX_LE_BYTES); free(sky2->rx_ring); - free_dma(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le)); + free_phys(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le)); free(sky2->tx_ring); sky2->tx_le = NULL; @@ -1137,7 +1137,7 @@ static int sky2_up(struct net_device *dev) netdev_link_down(dev); /* must be power of 2 */ - sky2->tx_le = malloc_dma(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN); + sky2->tx_le = malloc_phys(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN); sky2->tx_le_map = virt_to_bus(sky2->tx_le); if (!sky2->tx_le) goto err_out; @@ -1149,7 +1149,7 @@ static int sky2_up(struct net_device *dev) tx_init(sky2); - sky2->rx_le = malloc_dma(RX_LE_BYTES, RX_RING_ALIGN); + sky2->rx_le = malloc_phys(RX_LE_BYTES, RX_RING_ALIGN); sky2->rx_le_map = virt_to_bus(sky2->rx_le); if (!sky2->rx_le) goto err_out; @@ -2278,14 +2278,14 @@ static int sky2_probe(struct pci_device *pdev) hw->pdev = pdev; - hw->regs = (unsigned long)ioremap(pci_bar_start(pdev, PCI_BASE_ADDRESS_0), 0x4000); + hw->regs = (unsigned long)pci_ioremap(pdev, pci_bar_start(pdev, PCI_BASE_ADDRESS_0), 0x4000); if (!hw->regs) { DBG(PFX "cannot map device registers\n"); goto err_out_free_hw; } /* ring for status responses */ - hw->st_le = malloc_dma(STATUS_LE_BYTES, STATUS_RING_ALIGN); + hw->st_le = malloc_phys(STATUS_LE_BYTES, STATUS_RING_ALIGN); if (!hw->st_le) goto err_out_iounmap; hw->st_dma = virt_to_bus(hw->st_le); @@ -2344,7 +2344,7 @@ err_out_free_netdev: netdev_put(dev); err_out_free_pci: sky2_write8(hw, B0_CTST, CS_RST_SET); - free_dma(hw->st_le, STATUS_LE_BYTES); + free_phys(hw->st_le, STATUS_LE_BYTES); err_out_iounmap: iounmap((void *)hw->regs); err_out_free_hw: @@ -2373,7 +2373,7 @@ static void sky2_remove(struct pci_device *pdev) sky2_write8(hw, B0_CTST, CS_RST_SET); sky2_read8(hw, B0_CTST); - free_dma(hw->st_le, STATUS_LE_BYTES); + free_phys(hw->st_le, STATUS_LE_BYTES); for (i = hw->ports-1; i >= 0; --i) { netdev_nullify(hw->dev[i]); diff --git a/src/drivers/net/tg3/tg3.c b/src/drivers/net/tg3/tg3.c index 1bed06649..cec599c1c 100644 --- a/src/drivers/net/tg3/tg3.c +++ b/src/drivers/net/tg3/tg3.c @@ -42,7 +42,7 @@ void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr) { DBGP("%s\n", __func__); if (tpr->rx_std) { - free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp)); + free_phys(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp)); tpr->rx_std = NULL; } } @@ -55,7 +55,7 @@ static void tg3_free_consistent(struct tg3 *tp) { DBGP("%s\n", __func__); if (tp->tx_ring) { - free_dma(tp->tx_ring, TG3_TX_RING_BYTES); + free_phys(tp->tx_ring, TG3_TX_RING_BYTES); tp->tx_ring = NULL; } @@ -63,7 +63,7 @@ static void tg3_free_consistent(struct tg3 *tp) tp->tx_buffers = NULL; if (tp->rx_rcb) { - free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp)); + free_phys(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp)); tp->rx_rcb_mapping = 0; tp->rx_rcb = NULL; } @@ -71,7 +71,7 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_rx_prodring_fini(&tp->prodring); if (tp->hw_status) { - free_dma(tp->hw_status, TG3_HW_STATUS_SIZE); + free_phys(tp->hw_status, TG3_HW_STATUS_SIZE); tp->status_mapping = 0; tp->hw_status = NULL; } @@ -87,7 +87,7 @@ int tg3_alloc_consistent(struct tg3 *tp) struct tg3_hw_status *sblk; struct tg3_rx_prodring_set *tpr = &tp->prodring; - tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT); + tp->hw_status = malloc_phys(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT); if (!tp->hw_status) { DBGC(tp->dev, "hw_status alloc failed\n"); goto err_out; @@ -97,7 +97,7 @@ int tg3_alloc_consistent(struct tg3 *tp) memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); sblk = tp->hw_status; - tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT); + tpr->rx_std = malloc_phys(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT); if (!tpr->rx_std) { DBGC(tp->dev, "rx prodring alloc failed\n"); goto err_out; @@ -109,7 +109,7 @@ int tg3_alloc_consistent(struct tg3 *tp) if (!tp->tx_buffers) goto err_out; - tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT); + tp->tx_ring = malloc_phys(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT); if (!tp->tx_ring) goto err_out; tp->tx_desc_mapping = virt_to_bus(tp->tx_ring); @@ -123,7 +123,7 @@ int tg3_alloc_consistent(struct tg3 *tp) tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; - tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT); + tp->rx_rcb = malloc_phys(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT); if (!tp->rx_rcb) goto err_out; tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb); @@ -541,7 +541,7 @@ static int tg3_test_dma(struct tg3 *tp) u32 *buf; int ret = 0; - buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT); + buf = malloc_phys(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT); if (!buf) { ret = -ENOMEM; goto out_nofree; @@ -708,7 +708,7 @@ static int tg3_test_dma(struct tg3 *tp) } out: - free_dma(buf, TEST_BUFFER_SIZE); + free_phys(buf, TEST_BUFFER_SIZE); out_nofree: return ret; } @@ -771,7 +771,7 @@ static int tg3_init_one(struct pci_device *pdev) reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0); reg_size = pci_bar_size(pdev, PCI_BASE_ADDRESS_0); - tp->regs = ioremap(reg_base, reg_size); + tp->regs = pci_ioremap(pdev, reg_base, reg_size); if (!tp->regs) { DBGC(&pdev->dev, "Failed to remap device registers\n"); errno = -ENOENT; diff --git a/src/drivers/net/thunderx.c b/src/drivers/net/thunderx.c index 9ddb98ab8..1865a9b91 100644 --- a/src/drivers/net/thunderx.c +++ b/src/drivers/net/thunderx.c @@ -645,11 +645,11 @@ static void txnic_poll ( struct txnic *vnic ) { /** * Allocate virtual NIC * - * @v dev Underlying device + * @v pci Underlying PCI device * @v membase Register base address * @ret vnic Virtual NIC, or NULL on failure */ -static struct txnic * txnic_alloc ( struct device *dev, +static struct txnic * txnic_alloc ( struct pci_device *pci, unsigned long membase ) { struct net_device *netdev; struct txnic *vnic; @@ -658,10 +658,10 @@ static struct txnic * txnic_alloc ( struct device *dev, netdev = alloc_etherdev ( sizeof ( *vnic ) ); if ( ! netdev ) goto err_alloc_netdev; - netdev->dev = dev; + netdev->dev = &pci->dev; vnic = netdev->priv; vnic->netdev = netdev; - vnic->name = dev->name; + vnic->name = pci->dev.name; /* Allow caller to reuse netdev->priv. (The generic virtual * NIC code never assumes that netdev->priv==vnic.) @@ -684,7 +684,7 @@ static struct txnic * txnic_alloc ( struct device *dev, goto err_alloc_rq; /* Map registers */ - vnic->regs = ioremap ( membase, TXNIC_VF_BAR_SIZE ); + vnic->regs = pci_ioremap ( pci, membase, TXNIC_VF_BAR_SIZE ); if ( ! vnic->regs ) goto err_ioremap; @@ -1103,7 +1103,7 @@ static int txnic_lmac_probe ( struct txnic_lmac *lmac ) { membase = ( pf->vf_membase + ( lmac->idx * pf->vf_stride ) ); /* Allocate and initialise network device */ - vnic = txnic_alloc ( &bgx->pci->dev, membase ); + vnic = txnic_alloc ( bgx->pci, membase ); if ( ! vnic ) { rc = -ENOMEM; goto err_alloc; @@ -1275,7 +1275,7 @@ static int txnic_pf_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - pf->regs = ioremap ( membase, TXNIC_PF_BAR_SIZE ); + pf->regs = pci_ioremap ( pci, membase, TXNIC_PF_BAR_SIZE ); if ( ! pf->regs ) { rc = -ENODEV; goto err_ioremap; @@ -1633,7 +1633,7 @@ static int txnic_bgx_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - bgx->regs = ioremap ( membase, TXNIC_BGX_BAR_SIZE ); + bgx->regs = pci_ioremap ( pci, membase, TXNIC_BGX_BAR_SIZE ); if ( ! bgx->regs ) { rc = -ENODEV; goto err_ioremap; diff --git a/src/drivers/net/velocity.c b/src/drivers/net/velocity.c index 0a2a3ac10..373714293 100644 --- a/src/drivers/net/velocity.c +++ b/src/drivers/net/velocity.c @@ -320,7 +320,8 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) { vlc->rx_prod = 0; vlc->rx_cons = 0; vlc->rx_commit = 0; - vlc->rx_ring = malloc_dma ( VELOCITY_RXDESC_SIZE, VELOCITY_RING_ALIGN ); + vlc->rx_ring = malloc_phys ( VELOCITY_RXDESC_SIZE, + VELOCITY_RING_ALIGN ); if ( ! vlc->rx_ring ) return -ENOMEM; @@ -332,7 +333,8 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) { /* Allocate TX descriptor ring */ vlc->tx_prod = 0; vlc->tx_cons = 0; - vlc->tx_ring = malloc_dma ( VELOCITY_TXDESC_SIZE, VELOCITY_RING_ALIGN ); + vlc->tx_ring = malloc_phys ( VELOCITY_TXDESC_SIZE, + VELOCITY_RING_ALIGN ); if ( ! vlc->tx_ring ) { rc = -ENOMEM; goto err_tx_alloc; @@ -356,7 +358,7 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) { return 0; err_tx_alloc: - free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE ); + free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE ); return rc; } @@ -482,7 +484,7 @@ static void velocity_close ( struct net_device *netdev ) { writew ( 0, vlc->regs + VELOCITY_RXDESCNUM ); /* Destroy RX ring */ - free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE ); + free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE ); vlc->rx_ring = NULL; vlc->rx_prod = 0; vlc->rx_cons = 0; @@ -499,7 +501,7 @@ static void velocity_close ( struct net_device *netdev ) { writew ( 0, vlc->regs + VELOCITY_TXDESCNUM ); /* Destroy TX ring */ - free_dma ( vlc->tx_ring, VELOCITY_TXDESC_SIZE ); + free_phys ( vlc->tx_ring, VELOCITY_TXDESC_SIZE ); vlc->tx_ring = NULL; vlc->tx_prod = 0; vlc->tx_cons = 0; @@ -731,7 +733,7 @@ static int velocity_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map registers */ - vlc->regs = ioremap ( pci->membase, VELOCITY_BAR_SIZE ); + vlc->regs = pci_ioremap ( pci, pci->membase, VELOCITY_BAR_SIZE ); vlc->netdev = netdev; /* Reset the NIC */ diff --git a/src/drivers/net/vmxnet3.c b/src/drivers/net/vmxnet3.c index 6a54dbf89..63bcf0e01 100644 --- a/src/drivers/net/vmxnet3.c +++ b/src/drivers/net/vmxnet3.c @@ -465,7 +465,8 @@ static int vmxnet3_open ( struct net_device *netdev ) { int rc; /* Allocate DMA areas */ - vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN ); + vmxnet->dma = malloc_phys ( sizeof ( *vmxnet->dma ), + VMXNET3_DMA_ALIGN ); if ( ! vmxnet->dma ) { DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n", vmxnet ); @@ -542,7 +543,7 @@ static int vmxnet3_open ( struct net_device *netdev ) { err_activate: vmxnet3_flush_tx ( netdev ); vmxnet3_flush_rx ( netdev ); - free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) ); + free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) ); err_alloc_dma: return rc; } @@ -559,7 +560,7 @@ static void vmxnet3_close ( struct net_device *netdev ) { vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ); vmxnet3_flush_tx ( netdev ); vmxnet3_flush_rx ( netdev ); - free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) ); + free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) ); } /** vmxnet3 net device operations */ @@ -641,14 +642,14 @@ static int vmxnet3_probe ( struct pci_device *pci ) { adjust_pci_device ( pci ); /* Map PCI BARs */ - vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ), - VMXNET3_PT_LEN ); + vmxnet->pt = pci_ioremap ( pci, pci_bar_start ( pci, VMXNET3_PT_BAR ), + VMXNET3_PT_LEN ); if ( ! vmxnet->pt ) { rc = -ENODEV; goto err_ioremap_pt; } - vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ), - VMXNET3_VD_LEN ); + vmxnet->vd = pci_ioremap ( pci, pci_bar_start ( pci, VMXNET3_VD_BAR ), + VMXNET3_VD_LEN ); if ( ! vmxnet->vd ) { rc = -ENODEV; goto err_ioremap_vd; diff --git a/src/drivers/net/vxge/vxge_config.c b/src/drivers/net/vxge/vxge_config.c index ba62b508e..f4d217097 100644 --- a/src/drivers/net/vxge/vxge_config.c +++ b/src/drivers/net/vxge/vxge_config.c @@ -624,10 +624,10 @@ __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath, hldev = vpath->hldev; vp_id = vpath->vp_id; - ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block), + ring->rxdl = malloc_phys(sizeof(struct __vxge_hw_ring_block), sizeof(struct __vxge_hw_ring_block)); if (!ring->rxdl) { - vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n", + vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n", __func__, __LINE__); status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; @@ -667,7 +667,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring) } if (ring->rxdl) { - free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block)); + free_phys(ring->rxdl, sizeof(struct __vxge_hw_ring_block)); ring->rxdl = NULL; } ring->rxd_offset = 0; @@ -826,10 +826,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath, fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP) + VXGE_HW_VPATH_INTR_TX; - fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd) + fifo->txdl = malloc_phys(sizeof(struct vxge_hw_fifo_txd) * fifo->depth, fifo->depth); if (!fifo->txdl) { - vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n", + vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n", __func__, __LINE__); return VXGE_HW_ERR_OUT_OF_MEMORY; } @@ -846,7 +846,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo) vxge_trace(); if (fifo->txdl) - free_dma(fifo->txdl, + free_phys(fifo->txdl, sizeof(struct vxge_hw_fifo_txd) * fifo->depth); fifo->txdl = NULL; diff --git a/src/drivers/net/vxge/vxge_main.c b/src/drivers/net/vxge/vxge_main.c index 8b099c0e2..631928318 100644 --- a/src/drivers/net/vxge/vxge_main.c +++ b/src/drivers/net/vxge/vxge_main.c @@ -520,7 +520,7 @@ vxge_probe(struct pci_device *pdev) /* sets the bus master */ adjust_pci_device(pdev); - bar0 = ioremap(mmio_start, mmio_len); + bar0 = pci_ioremap(pdev, mmio_start, mmio_len); if (!bar0) { vxge_debug(VXGE_ERR, "%s : cannot remap io memory bar0\n", __func__); diff --git a/src/drivers/usb/ehci.c b/src/drivers/usb/ehci.c index cd3967070..77022a47d 100644 --- a/src/drivers/usb/ehci.c +++ b/src/drivers/usb/ehci.c @@ -565,8 +565,8 @@ static int ehci_ring_alloc ( struct ehci_device *ehci, } /* Allocate queue head */ - ring->head = malloc_dma ( sizeof ( *ring->head ), - ehci_align ( sizeof ( *ring->head ) ) ); + ring->head = malloc_phys ( sizeof ( *ring->head ), + ehci_align ( sizeof ( *ring->head ) ) ); if ( ! ring->head ) { rc = -ENOMEM; goto err_alloc_queue; @@ -579,7 +579,7 @@ static int ehci_ring_alloc ( struct ehci_device *ehci, /* Allocate transfer descriptors */ len = ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) ); - ring->desc = malloc_dma ( len, sizeof ( ring->desc[0] ) ); + ring->desc = malloc_phys ( len, sizeof ( ring->desc[0] ) ); if ( ! ring->desc ) { rc = -ENOMEM; goto err_alloc_desc; @@ -607,10 +607,10 @@ static int ehci_ring_alloc ( struct ehci_device *ehci, return 0; err_unreachable_desc: - free_dma ( ring->desc, len ); + free_phys ( ring->desc, len ); err_alloc_desc: err_unreachable_queue: - free_dma ( ring->head, sizeof ( *ring->head ) ); + free_phys ( ring->head, sizeof ( *ring->head ) ); err_alloc_queue: free ( ring->iobuf ); err_alloc_iobuf: @@ -631,10 +631,11 @@ static void ehci_ring_free ( struct ehci_ring *ring ) { assert ( ring->iobuf[i] == NULL ); /* Free transfer descriptors */ - free_dma ( ring->desc, ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) ) ); + free_phys ( ring->desc, ( EHCI_RING_COUNT * + sizeof ( ring->desc[0] ) ) ); /* Free queue head */ - free_dma ( ring->head, sizeof ( *ring->head ) ); + free_phys ( ring->head, sizeof ( *ring->head ) ); /* Free I/O buffers */ free ( ring->iobuf ); @@ -1552,8 +1553,7 @@ static void ehci_hub_close ( struct usb_hub *hub __unused ) { * @ret rc Return status code */ static int ehci_root_open ( struct usb_hub *hub ) { - struct usb_bus *bus = hub->bus; - struct ehci_device *ehci = usb_bus_get_hostdata ( bus ); + struct ehci_device *ehci = usb_hub_get_drvdata ( hub ); uint32_t portsc; unsigned int i; @@ -1571,9 +1571,6 @@ static int ehci_root_open ( struct usb_hub *hub ) { /* Wait 20ms after potentially enabling power to a port */ mdelay ( EHCI_PORT_POWER_DELAY_MS ); - /* Record hub driver private data */ - usb_hub_set_drvdata ( hub, ehci ); - return 0; } @@ -1587,9 +1584,6 @@ static void ehci_root_close ( struct usb_hub *hub ) { /* Route all ports back to companion controllers */ writel ( 0, ehci->op + EHCI_OP_CONFIGFLAG ); - - /* Clear hub driver private data */ - usb_hub_set_drvdata ( hub, NULL ); } /** @@ -1794,8 +1788,8 @@ static int ehci_bus_open ( struct usb_bus *bus ) { assert ( list_empty ( &ehci->periodic ) ); /* Allocate and initialise asynchronous queue head */ - ehci->head = malloc_dma ( sizeof ( *ehci->head ), - ehci_align ( sizeof ( *ehci->head ) ) ); + ehci->head = malloc_phys ( sizeof ( *ehci->head ), + ehci_align ( sizeof ( *ehci->head ) ) ); if ( ! ehci->head ) { rc = -ENOMEM; goto err_alloc_head; @@ -1823,7 +1817,7 @@ static int ehci_bus_open ( struct usb_bus *bus ) { /* Allocate periodic frame list */ frames = EHCI_PERIODIC_FRAMES ( ehci->flsize ); len = ( frames * sizeof ( ehci->frame[0] ) ); - ehci->frame = malloc_dma ( len, EHCI_PAGE_ALIGN ); + ehci->frame = malloc_phys ( len, EHCI_PAGE_ALIGN ); if ( ! ehci->frame ) { rc = -ENOMEM; goto err_alloc_frame; @@ -1843,10 +1837,10 @@ static int ehci_bus_open ( struct usb_bus *bus ) { ehci_stop ( ehci ); err_unreachable_frame: - free_dma ( ehci->frame, len ); + free_phys ( ehci->frame, len ); err_alloc_frame: err_ctrldssegment: - free_dma ( ehci->head, sizeof ( *ehci->head ) ); + free_phys ( ehci->head, sizeof ( *ehci->head ) ); err_alloc_head: return rc; } @@ -1868,10 +1862,10 @@ static void ehci_bus_close ( struct usb_bus *bus ) { ehci_stop ( ehci ); /* Free periodic frame list */ - free_dma ( ehci->frame, ( frames * sizeof ( ehci->frame[0] ) ) ); + free_phys ( ehci->frame, ( frames * sizeof ( ehci->frame[0] ) ) ); /* Free asynchronous schedule */ - free_dma ( ehci->head, sizeof ( *ehci->head ) ); + free_phys ( ehci->head, sizeof ( *ehci->head ) ); } /** @@ -1996,7 +1990,7 @@ static int ehci_probe ( struct pci_device *pci ) { /* Map registers */ bar_start = pci_bar_start ( pci, EHCI_BAR ); bar_size = pci_bar_size ( pci, EHCI_BAR ); - ehci->regs = ioremap ( bar_start, bar_size ); + ehci->regs = pci_ioremap ( pci, bar_start, bar_size ); if ( ! ehci->regs ) { rc = -ENODEV; goto err_ioremap; diff --git a/src/drivers/usb/uhci.c b/src/drivers/usb/uhci.c index 528c1be1d..47474bdc7 100644 --- a/src/drivers/usb/uhci.c +++ b/src/drivers/usb/uhci.c @@ -179,7 +179,7 @@ static int uhci_ring_alloc ( struct uhci_ring *ring ) { memset ( ring, 0, sizeof ( *ring ) ); /* Allocate queue head */ - ring->head = malloc_dma ( sizeof ( *ring->head ), UHCI_ALIGN ); + ring->head = malloc_phys ( sizeof ( *ring->head ), UHCI_ALIGN ); if ( ! ring->head ) { rc = -ENOMEM; goto err_alloc; @@ -194,7 +194,7 @@ static int uhci_ring_alloc ( struct uhci_ring *ring ) { return 0; err_unreachable: - free_dma ( ring->head, sizeof ( *ring->head ) ); + free_phys ( ring->head, sizeof ( *ring->head ) ); err_alloc: return rc; } @@ -213,7 +213,7 @@ static void uhci_ring_free ( struct uhci_ring *ring ) { assert ( ring->xfer[i] == NULL ); /* Free queue head */ - free_dma ( ring->head, sizeof ( *ring->head ) ); + free_phys ( ring->head, sizeof ( *ring->head ) ); } /** @@ -263,7 +263,7 @@ static int uhci_enqueue ( struct uhci_ring *ring, struct io_buffer *iobuf, /* Allocate transfer descriptors */ len = ( count * sizeof ( xfer->desc[0] ) ); - xfer->desc = malloc_dma ( len, UHCI_ALIGN ); + xfer->desc = malloc_phys ( len, UHCI_ALIGN ); if ( ! xfer->desc ) { rc = -ENOMEM; goto err_alloc_desc; @@ -299,7 +299,7 @@ static int uhci_enqueue ( struct uhci_ring *ring, struct io_buffer *iobuf, return 0; err_unreachable_desc: - free_dma ( xfer->desc, len ); + free_phys ( xfer->desc, len ); err_alloc_desc: free ( xfer ); err_alloc_xfer: @@ -377,7 +377,7 @@ static struct io_buffer * uhci_dequeue ( struct uhci_ring *ring ) { /* Free transfer descriptors */ len = ( xfer->prod * sizeof ( xfer->desc[0] ) ); - free_dma ( xfer->desc, len ); + free_phys ( xfer->desc, len ); /* Free transfer */ free ( xfer ); @@ -1124,13 +1124,9 @@ static void uhci_hub_close ( struct usb_hub *hub __unused ) { * @v hub USB hub * @ret rc Return status code */ -static int uhci_root_open ( struct usb_hub *hub ) { - struct usb_bus *bus = hub->bus; - struct uhci_device *uhci = usb_bus_get_hostdata ( bus ); - - /* Record hub driver private data */ - usb_hub_set_drvdata ( hub, uhci ); +static int uhci_root_open ( struct usb_hub *hub __unused) { + /* Nothing to do */ return 0; } @@ -1139,10 +1135,9 @@ static int uhci_root_open ( struct usb_hub *hub ) { * * @v hub USB hub */ -static void uhci_root_close ( struct usb_hub *hub ) { +static void uhci_root_close ( struct usb_hub *hub __unused ) { - /* Clear hub driver private data */ - usb_hub_set_drvdata ( hub, NULL ); + /* Nothing to do */ } /** @@ -1317,7 +1312,7 @@ static int uhci_bus_open ( struct usb_bus *bus ) { assert ( list_empty ( &uhci->periodic ) ); /* Allocate and initialise asynchronous queue head */ - uhci->head = malloc_dma ( sizeof ( *uhci->head ), UHCI_ALIGN ); + uhci->head = malloc_phys ( sizeof ( *uhci->head ), UHCI_ALIGN ); if ( ! uhci->head ) { rc = -ENOMEM; goto err_alloc_head; @@ -1329,8 +1324,8 @@ static int uhci_bus_open ( struct usb_bus *bus ) { uhci_async_schedule ( uhci ); /* Allocate periodic frame list */ - uhci->frame = malloc_dma ( sizeof ( *uhci->frame ), - sizeof ( *uhci->frame ) ); + uhci->frame = malloc_phys ( sizeof ( *uhci->frame ), + sizeof ( *uhci->frame ) ); if ( ! uhci->frame ) { rc = -ENOMEM; goto err_alloc_frame; @@ -1348,10 +1343,10 @@ static int uhci_bus_open ( struct usb_bus *bus ) { uhci_stop ( uhci ); err_unreachable_frame: - free_dma ( uhci->frame, sizeof ( *uhci->frame ) ); + free_phys ( uhci->frame, sizeof ( *uhci->frame ) ); err_alloc_frame: err_unreachable_head: - free_dma ( uhci->head, sizeof ( *uhci->head ) ); + free_phys ( uhci->head, sizeof ( *uhci->head ) ); err_alloc_head: return rc; } @@ -1372,10 +1367,10 @@ static void uhci_bus_close ( struct usb_bus *bus ) { uhci_stop ( uhci ); /* Free periodic frame list */ - free_dma ( uhci->frame, sizeof ( *uhci->frame ) ); + free_phys ( uhci->frame, sizeof ( *uhci->frame ) ); /* Free asynchronous schedule */ - free_dma ( uhci->head, sizeof ( *uhci->head ) ); + free_phys ( uhci->head, sizeof ( *uhci->head ) ); } /** diff --git a/src/drivers/usb/usbblk.c b/src/drivers/usb/usbblk.c new file mode 100644 index 000000000..5a086d3f8 --- /dev/null +++ b/src/drivers/usb/usbblk.c @@ -0,0 +1,912 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "usbblk.h" + +/** @file + * + * USB mass storage driver + * + */ + +static void usbblk_stop ( struct usbblk_device *usbblk, int rc ); + +/** List of USB block devices */ +static LIST_HEAD ( usbblk_devices ); + +/****************************************************************************** + * + * Endpoint management + * + ****************************************************************************** + */ + +/** + * Open endpoints + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_open ( struct usbblk_device *usbblk ) { + struct usb_device *usb = usbblk->func->usb; + unsigned int interface = usbblk->func->interface[0]; + int rc; + + /* Sanity checks */ + assert ( ! usbblk->in.open ); + assert ( ! usbblk->out.open ); + + /* Issue reset */ + if ( ( rc = usb_control ( usb, USBBLK_RESET, 0, interface, + NULL, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not issue reset: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_reset; + } + + /* Open bulk OUT endpoint */ + if ( ( rc = usb_endpoint_open ( &usbblk->out ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not open bulk OUT: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_open_out; + } + + /* Clear any bulk OUT halt condition */ + if ( ( rc = usb_endpoint_clear_halt ( &usbblk->out ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not reset bulk OUT: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_clear_out; + } + + /* Open bulk IN endpoint */ + if ( ( rc = usb_endpoint_open ( &usbblk->in ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not open bulk IN: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_open_in; + } + + /* Clear any bulk IN halt condition */ + if ( ( rc = usb_endpoint_clear_halt ( &usbblk->in ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not reset bulk IN: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_clear_in; + } + + return 0; + + err_clear_in: + usb_endpoint_close ( &usbblk->in ); + err_open_in: + err_clear_out: + usb_endpoint_close ( &usbblk->out ); + err_open_out: + err_reset: + return rc; +} + +/** + * Close endpoints + * + * @v usbblk USB block device + */ +static void usbblk_close ( struct usbblk_device *usbblk ) { + + /* Close bulk OUT endpoint */ + if ( usbblk->out.open ) + usb_endpoint_close ( &usbblk->out ); + + /* Close bulk IN endpoint */ + if ( usbblk->in.open ) + usb_endpoint_close ( &usbblk->in ); +} + +/****************************************************************************** + * + * Bulk OUT endpoint + * + ****************************************************************************** + */ + +/** + * Issue bulk OUT command + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_out_command ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + struct usbblk_command_wrapper *wrapper; + struct io_buffer *iobuf; + int rc; + + /* Sanity checks */ + assert ( cmd->tag ); + assert ( ! ( cmd->scsi.data_in_len && cmd->scsi.data_out_len ) ); + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( sizeof ( *wrapper ) ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Populate command */ + wrapper = iob_put ( iobuf, sizeof ( *wrapper ) ); + memset ( wrapper, 0, sizeof ( *wrapper ) ); + wrapper->signature = cpu_to_le32 ( USBBLK_COMMAND_SIGNATURE ); + wrapper->tag = cmd->tag; /* non-endian */ + if ( cmd->scsi.data_out_len ) { + wrapper->len = cpu_to_le32 ( cmd->scsi.data_out_len ); + } else { + wrapper->len = cpu_to_le32 ( cmd->scsi.data_in_len ); + wrapper->flags = USB_DIR_IN; + } + wrapper->lun = ntohs ( cmd->scsi.lun.u16[0] ); + wrapper->cblen = sizeof ( wrapper->cb ); + memcpy ( wrapper->cb, &cmd->scsi.cdb, sizeof ( wrapper->cb ) ); + + /* Issue command */ + if ( ( rc = usb_stream ( &usbblk->out, iobuf, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk OUT could not issue command: " + "%s\n", usbblk->func->name, strerror ( rc ) ); + goto err_stream; + } + + return 0; + + err_stream: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Send bulk OUT data block + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_out_data ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + struct io_buffer *iobuf; + size_t len; + int rc; + + /* Calculate length */ + assert ( cmd->tag ); + assert ( cmd->scsi.data_out != UNULL ); + assert ( cmd->offset < cmd->scsi.data_out_len ); + len = ( cmd->scsi.data_out_len - cmd->offset ); + if ( len > USBBLK_MAX_LEN ) + len = USBBLK_MAX_LEN; + assert ( ( len % usbblk->out.mtu ) == 0 ); + + /* Allocate I/O buffer */ + iobuf = alloc_iob ( len ); + if ( ! iobuf ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Populate I/O buffer */ + copy_from_user ( iob_put ( iobuf, len ), cmd->scsi.data_out, + cmd->offset, len ); + + /* Send data */ + if ( ( rc = usb_stream ( &usbblk->out, iobuf, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk OUT could not send data: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_stream; + } + + /* Consume data */ + cmd->offset += len; + + return 0; + + err_stream: + free_iob ( iobuf ); + err_alloc: + return rc; +} + +/** + * Refill bulk OUT endpoint + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_out_refill ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + int rc; + + /* Sanity checks */ + assert ( cmd->tag ); + + /* Refill endpoint */ + while ( ( cmd->offset < cmd->scsi.data_out_len ) && + ( usbblk->out.fill < USBBLK_MAX_FILL ) ) { + if ( ( rc = usbblk_out_data ( usbblk ) ) != 0 ) + return rc; + } + + return 0; +} + +/** + * Complete bulk OUT transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void usbblk_out_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct usbblk_device *usbblk = + container_of ( ep, struct usbblk_device, out ); + struct usbblk_command *cmd = &usbblk->cmd; + + /* Ignore cancellations after closing endpoint */ + if ( ! ep->open ) + goto drop; + + /* Sanity check */ + assert ( cmd->tag ); + + /* Check for failures */ + if ( rc != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk OUT failed: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err; + } + + /* Trigger refill process, if applicable */ + if ( cmd->offset < cmd->scsi.data_out_len ) + process_add ( &usbblk->process ); + + drop: + /* Free I/O buffer */ + free_iob ( iobuf ); + + return; + + err: + free_iob ( iobuf ); + usbblk_stop ( usbblk, rc ); +} + +/** Bulk OUT endpoint operations */ +static struct usb_endpoint_driver_operations usbblk_out_operations = { + .complete = usbblk_out_complete, +}; + +/****************************************************************************** + * + * Bulk IN endpoint + * + ****************************************************************************** + */ + +/** + * Handle bulk IN data block + * + * @v usbblk USB block device + * @v data Data block + * @v len Length of data + * @ret rc Return status code + */ +static int usbblk_in_data ( struct usbblk_device *usbblk, const void *data, + size_t len ) { + struct usbblk_command *cmd = &usbblk->cmd; + + /* Sanity checks */ + assert ( cmd->tag ); + assert ( cmd->scsi.data_in != UNULL ); + assert ( cmd->offset <= cmd->scsi.data_in_len ); + assert ( len <= ( cmd->scsi.data_in_len - cmd->offset ) ); + + /* Store data */ + copy_to_user ( cmd->scsi.data_in, cmd->offset, data, len ); + cmd->offset += len; + + return 0; +} + +/** + * Handle bulk IN status + * + * @v usbblk USB block device + * @v data Status data + * @v len Length of status data + * @ret rc Return status code + */ +static int usbblk_in_status ( struct usbblk_device *usbblk, const void *data, + size_t len ) { + struct usbblk_command *cmd = &usbblk->cmd; + const struct usbblk_status_wrapper *stat; + + /* Sanity checks */ + assert ( cmd->tag ); + + /* Validate length */ + if ( len < sizeof ( *stat ) ) { + DBGC ( usbblk, "USBBLK %s bulk IN malformed status:\n", + usbblk->func->name ); + DBGC_HDA ( usbblk, 0, data, len ); + return -EIO; + } + stat = data; + + /* Validate signature */ + if ( stat->signature != cpu_to_le32 ( USBBLK_STATUS_SIGNATURE ) ) { + DBGC ( usbblk, "USBBLK %s bulk IN invalid signature %08x:\n", + usbblk->func->name, le32_to_cpu ( stat->signature ) ); + DBGC_HDA ( usbblk, 0, stat, sizeof ( *stat ) ); + return -EIO; + } + + /* Validate tag */ + if ( stat->tag != cmd->tag ) { + DBGC ( usbblk, "USBBLK %s bulk IN tag mismatch (got %08x, " + "expected %08x):\n", + usbblk->func->name, stat->tag, cmd->tag ); + DBGC_HDA ( usbblk, 0, stat, sizeof ( *stat ) ); + return -EIO; + } + + /* Check status */ + if ( stat->status ) { + DBGC ( usbblk, "USBBLK %s bulk IN status %02x:\n", + usbblk->func->name, stat->status ); + DBGC_HDA ( usbblk, 0, stat, sizeof ( *stat ) ); + return -EIO; + } + + /* Check for residual data */ + if ( stat->residue ) { + DBGC ( usbblk, "USBBLK %s bulk IN residue %#x:\n", + usbblk->func->name, le32_to_cpu ( stat->residue ) ); + return -EIO; + } + + /* Mark command as complete */ + usbblk_stop ( usbblk, 0 ); + + return 0; +} + +/** + * Refill bulk IN endpoint + * + * @v usbblk USB block device + * @ret rc Return status code + */ +static int usbblk_in_refill ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + struct usbblk_status_wrapper *stat; + size_t remaining; + unsigned int max; + int rc; + + /* Sanity checks */ + assert ( cmd->tag ); + + /* Calculate maximum required refill */ + remaining = sizeof ( *stat ); + if ( cmd->scsi.data_in_len ) { + assert ( cmd->offset <= cmd->scsi.data_in_len ); + remaining += ( cmd->scsi.data_in_len - cmd->offset ); + } + max = ( ( remaining + USBBLK_MAX_LEN - 1 ) / USBBLK_MAX_LEN ); + + /* Refill bulk IN endpoint */ + if ( ( rc = usb_refill_limit ( &usbblk->in, max ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Complete bulk IN transfer + * + * @v ep USB endpoint + * @v iobuf I/O buffer + * @v rc Completion status code + */ +static void usbblk_in_complete ( struct usb_endpoint *ep, + struct io_buffer *iobuf, int rc ) { + struct usbblk_device *usbblk = + container_of ( ep, struct usbblk_device, in ); + struct usbblk_command *cmd = &usbblk->cmd; + size_t remaining; + size_t len; + + /* Ignore cancellations after closing endpoint */ + if ( ! ep->open ) + goto drop; + + /* Sanity check */ + assert ( cmd->tag ); + + /* Handle errors */ + if ( rc != 0 ) { + DBGC ( usbblk, "USBBLK %s bulk IN failed: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err; + } + + /* Trigger refill process */ + process_add ( &usbblk->process ); + + /* Handle data portion, if any */ + if ( cmd->scsi.data_in_len ) { + assert ( cmd->offset <= cmd->scsi.data_in_len ); + remaining = ( cmd->scsi.data_in_len - cmd->offset ); + len = iob_len ( iobuf ); + if ( len > remaining ) + len = remaining; + if ( len ) { + if ( ( rc = usbblk_in_data ( usbblk, iobuf->data, + len ) ) != 0 ) + goto err; + iob_pull ( iobuf, len ); + } + } + + /* Handle status portion, if any */ + len = iob_len ( iobuf ); + if ( len ) { + if ( ( rc = usbblk_in_status ( usbblk, iobuf->data, + len ) ) != 0 ) + goto err; + } + + drop: + /* Free I/O buffer */ + free_iob ( iobuf ); + + return; + + err: + free_iob ( iobuf ); + usbblk_stop ( usbblk, rc ); +} + +/** Bulk IN endpoint operations */ +static struct usb_endpoint_driver_operations usbblk_in_operations = { + .complete = usbblk_in_complete, +}; + +/****************************************************************************** + * + * Refill process + * + ****************************************************************************** + */ + +/** + * Refill endpoints + * + * @v usbblk USB block device + */ +static void usbblk_step ( struct usbblk_device *usbblk ) { + + /* Refill bulk OUT endpoint */ + usbblk_out_refill ( usbblk ); + + /* Refill bulk IN endpoint */ + usbblk_in_refill ( usbblk ); +} + +/** Refill process descriptor */ +static struct process_descriptor usbblk_process_desc = + PROC_DESC ( struct usbblk_device, process, usbblk_step ); + +/****************************************************************************** + * + * SCSI command management + * + ****************************************************************************** + */ + +/** Next command tag */ +static uint16_t usbblk_tag; + +/** + * Stop SCSI command + * + * @v usbblk USB block device + * @v rc Reason for stop + */ +static void usbblk_stop ( struct usbblk_device *usbblk, int rc ) { + + /* Stop process */ + process_del ( &usbblk->process ); + + /* Reset command */ + memset ( &usbblk->cmd, 0, sizeof ( usbblk->cmd ) ); + + /* Close endpoints if an error occurred */ + if ( rc != 0 ) { + DBGC ( usbblk, "USBBLK %s closing for error recovery\n", + usbblk->func->name ); + usbblk_close ( usbblk ); + } + + /* Terminate command */ + intf_restart ( &usbblk->data, rc ); +} + +/** + * Start new SCSI command + * + * @v usbblk USB block device + * @v scsicmd SCSI command + * @ret rc Return status code + */ +static int usbblk_start ( struct usbblk_device *usbblk, + struct scsi_cmd *scsicmd ) { + struct usbblk_command *cmd = &usbblk->cmd; + int rc; + + /* Fail if command is in progress */ + if ( cmd->tag ) { + rc = -EBUSY; + DBGC ( usbblk, "USBBLK %s cannot support multiple commands\n", + usbblk->func->name ); + goto err_busy; + } + + /* Refuse bidirectional commands */ + if ( scsicmd->data_in_len && scsicmd->data_out_len ) { + rc = -EOPNOTSUPP; + DBGC ( usbblk, "USBBLK %s cannot support bidirectional " + "commands\n", usbblk->func->name ); + goto err_bidirectional; + } + + /* Sanity checks */ + assert ( ! process_running ( &usbblk->process ) ); + assert ( cmd->offset == 0 ); + + /* Initialise command */ + memcpy ( &cmd->scsi, scsicmd, sizeof ( cmd->scsi ) ); + cmd->tag = ( USBBLK_TAG_MAGIC | ++usbblk_tag ); + + /* Issue bulk OUT command */ + if ( ( rc = usbblk_out_command ( usbblk ) ) != 0 ) + goto err_command; + + /* Start refill process */ + process_add ( &usbblk->process ); + + return 0; + + err_command: + memset ( &usbblk->cmd, 0, sizeof ( usbblk->cmd ) ); + err_bidirectional: + err_busy: + return rc; +} + +/****************************************************************************** + * + * SCSI interfaces + * + ****************************************************************************** + */ + +/** SCSI data interface operations */ +static struct interface_operation usbblk_data_operations[] = { + INTF_OP ( intf_close, struct usbblk_device *, usbblk_stop ), +}; + +/** SCSI data interface descriptor */ +static struct interface_descriptor usbblk_data_desc = + INTF_DESC ( struct usbblk_device, data, usbblk_data_operations ); + +/** + * Check SCSI command flow-control window + * + * @v usbblk USB block device + * @ret len Length of window + */ +static size_t usbblk_scsi_window ( struct usbblk_device *usbblk ) { + struct usbblk_command *cmd = &usbblk->cmd; + + /* Allow a single command if no command is currently in progress */ + return ( cmd->tag ? 0 : 1 ); +} + +/** + * Issue SCSI command + * + * @v usbblk USB block device + * @v data SCSI data interface + * @v scsicmd SCSI command + * @ret tag Command tag, or negative error + */ +static int usbblk_scsi_command ( struct usbblk_device *usbblk, + struct interface *data, + struct scsi_cmd *scsicmd ) { + struct usbblk_command *cmd = &usbblk->cmd; + int rc; + + /* (Re)open endpoints if needed */ + if ( ( ! usbblk->in.open ) && ( ( rc = usbblk_open ( usbblk ) ) != 0 ) ) + goto err_open; + + /* Start new command */ + if ( ( rc = usbblk_start ( usbblk, scsicmd ) ) != 0 ) + goto err_start; + + /* Attach to parent interface and return */ + intf_plug_plug ( &usbblk->data, data ); + return cmd->tag; + + usbblk_stop ( usbblk, rc ); + err_start: + usbblk_close ( usbblk ); + err_open: + return rc; +} + +/** + * Close SCSI interface + * + * @v usbblk USB block device + * @v rc Reason for close + */ +static void usbblk_scsi_close ( struct usbblk_device *usbblk, int rc ) { + + /* Restart interfaces */ + intfs_restart ( rc, &usbblk->scsi, &usbblk->data, NULL ); + + /* Stop any in-progress command */ + usbblk_stop ( usbblk, rc ); + + /* Close endpoints */ + usbblk_close ( usbblk ); + + /* Flag as closed */ + usbblk->opened = 0; +} + +/** + * Describe as an EFI device path + * + * @v usbblk USB block device + * @ret path EFI device path, or NULL on error + */ +static EFI_DEVICE_PATH_PROTOCOL * +usbblk_efi_describe ( struct usbblk_device *usbblk ) { + + return efi_usb_path ( usbblk->func ); +} + +/** SCSI command interface operations */ +static struct interface_operation usbblk_scsi_operations[] = { + INTF_OP ( scsi_command, struct usbblk_device *, usbblk_scsi_command ), + INTF_OP ( xfer_window, struct usbblk_device *, usbblk_scsi_window ), + INTF_OP ( intf_close, struct usbblk_device *, usbblk_scsi_close ), + EFI_INTF_OP ( efi_describe, struct usbblk_device *, + usbblk_efi_describe ), +}; + +/** SCSI command interface descriptor */ +static struct interface_descriptor usbblk_scsi_desc = + INTF_DESC ( struct usbblk_device, scsi, usbblk_scsi_operations ); + +/****************************************************************************** + * + * SAN device interface + * + ****************************************************************************** + */ + +/** + * Find USB block device + * + * @v name USB block device name + * @ret usbblk USB block device, or NULL + */ +static struct usbblk_device * usbblk_find ( const char *name ) { + struct usbblk_device *usbblk; + + /* Look for matching device */ + list_for_each_entry ( usbblk, &usbblk_devices, list ) { + if ( strcmp ( usbblk->func->name, name ) == 0 ) + return usbblk; + } + + return NULL; +} + +/** + * Open USB block device URI + * + * @v parent Parent interface + * @v uri URI + * @ret rc Return status code + */ +static int usbblk_open_uri ( struct interface *parent, struct uri *uri ) { + static struct scsi_lun lun; + struct usbblk_device *usbblk; + int rc; + + /* Sanity check */ + if ( ! uri->opaque ) + return -EINVAL; + + /* Find matching device */ + usbblk = usbblk_find ( uri->opaque ); + if ( ! usbblk ) + return -ENOENT; + + /* Fail if device is already open */ + if ( usbblk->opened ) + return -EBUSY; + + /* Open SCSI device */ + if ( ( rc = scsi_open ( parent, &usbblk->scsi, &lun ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not open SCSI device: %s\n", + usbblk->func->name, strerror ( rc ) ); + return rc; + } + + /* Mark as opened */ + usbblk->opened = 1; + + return 0; +} + +/** USB block device URI opener */ +struct uri_opener usbblk_uri_opener __uri_opener = { + .scheme = "usb", + .open = usbblk_open_uri, +}; + +/****************************************************************************** + * + * USB interface + * + ****************************************************************************** + */ + +/** + * Probe device + * + * @v func USB function + * @v config Configuration descriptor + * @ret rc Return status code + */ +static int usbblk_probe ( struct usb_function *func, + struct usb_configuration_descriptor *config ) { + struct usb_device *usb = func->usb; + struct usbblk_device *usbblk; + struct usb_interface_descriptor *desc; + int rc; + + /* Allocate and initialise structure */ + usbblk = zalloc ( sizeof ( *usbblk ) ); + if ( ! usbblk ) { + rc = -ENOMEM; + goto err_alloc; + } + usbblk->func = func; + usb_endpoint_init ( &usbblk->out, usb, &usbblk_out_operations ); + usb_endpoint_init ( &usbblk->in, usb, &usbblk_in_operations ); + usb_refill_init ( &usbblk->in, 0, USBBLK_MAX_LEN, USBBLK_MAX_FILL ); + intf_init ( &usbblk->scsi, &usbblk_scsi_desc, &usbblk->refcnt ); + intf_init ( &usbblk->data, &usbblk_data_desc, &usbblk->refcnt ); + process_init_stopped ( &usbblk->process, &usbblk_process_desc, + &usbblk->refcnt ); + + /* Locate interface descriptor */ + desc = usb_interface_descriptor ( config, func->interface[0], 0 ); + if ( ! desc ) { + DBGC ( usbblk, "USBBLK %s missing interface descriptor\n", + usbblk->func->name ); + rc = -ENOENT; + goto err_desc; + } + + /* Describe endpoints */ + if ( ( rc = usb_endpoint_described ( &usbblk->out, config, desc, + USB_BULK_OUT, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not describe bulk OUT: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_out; + } + if ( ( rc = usb_endpoint_described ( &usbblk->in, config, desc, + USB_BULK_IN, 0 ) ) != 0 ) { + DBGC ( usbblk, "USBBLK %s could not describe bulk IN: %s\n", + usbblk->func->name, strerror ( rc ) ); + goto err_in; + } + + /* Add to list of devices */ + list_add_tail ( &usbblk->list, &usbblk_devices ); + + usb_func_set_drvdata ( func, usbblk ); + return 0; + + err_in: + err_out: + err_desc: + ref_put ( &usbblk->refcnt ); + err_alloc: + return rc; +} + +/** + * Remove device + * + * @v func USB function + */ +static void usbblk_remove ( struct usb_function *func ) { + struct usbblk_device *usbblk = usb_func_get_drvdata ( func ); + + /* Remove from list of devices */ + list_del ( &usbblk->list ); + + /* Close all interfaces */ + usbblk_scsi_close ( usbblk, -ENODEV ); + + /* Shut down interfaces */ + intfs_shutdown ( -ENODEV, &usbblk->scsi, &usbblk->data, NULL ); + + /* Drop reference */ + ref_put ( &usbblk->refcnt ); +} + +/** Mass storage class device IDs */ +static struct usb_device_id usbblk_ids[] = { + { + .name = "usbblk", + .vendor = USB_ANY_ID, + .product = USB_ANY_ID, + }, +}; + +/** Mass storage driver */ +struct usb_driver usbblk_driver __usb_driver = { + .ids = usbblk_ids, + .id_count = ( sizeof ( usbblk_ids ) / sizeof ( usbblk_ids[0] ) ), + .class = USB_CLASS_ID ( USB_CLASS_MSC, USB_SUBCLASS_MSC_SCSI, + USB_PROTOCOL_MSC_BULK ), + .score = USB_SCORE_NORMAL, + .probe = usbblk_probe, + .remove = usbblk_remove, +}; diff --git a/src/drivers/usb/usbblk.h b/src/drivers/usb/usbblk.h new file mode 100644 index 000000000..65d0705e3 --- /dev/null +++ b/src/drivers/usb/usbblk.h @@ -0,0 +1,121 @@ +#ifndef _USBBLK_H +#define _USBBLK_H + +/** @file + * + * USB mass storage driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** Mass storage class code */ +#define USB_CLASS_MSC 0x08 + +/** SCSI command set subclass code */ +#define USB_SUBCLASS_MSC_SCSI 0x06 + +/** Bulk-only transport protocol */ +#define USB_PROTOCOL_MSC_BULK 0x50 + +/** Mass storage reset command */ +#define USBBLK_RESET ( USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE | \ + USB_REQUEST_TYPE ( 255 ) ) + +/** Command block wrapper */ +struct usbblk_command_wrapper { + /** Signature */ + uint32_t signature; + /** Tag */ + uint32_t tag; + /** Data transfer length */ + uint32_t len; + /** Flags */ + uint8_t flags; + /** LUN */ + uint8_t lun; + /** Command block length */ + uint8_t cblen; + /** Command block */ + uint8_t cb[16]; +} __attribute__ (( packed )); + +/** Command block wrapper signature */ +#define USBBLK_COMMAND_SIGNATURE 0x43425355UL + +/** Command status wrapper */ +struct usbblk_status_wrapper { + /** Signature */ + uint32_t signature; + /** Tag */ + uint32_t tag; + /** Data residue */ + uint32_t residue; + /** Status */ + uint8_t status; +} __attribute__ (( packed )); + +/** Command status wrapper signature */ +#define USBBLK_STATUS_SIGNATURE 0x53425355UL + +/** A USB mass storage command */ +struct usbblk_command { + /** SCSI command */ + struct scsi_cmd scsi; + /** Command tag (0 for no command in progress) */ + uint32_t tag; + /** Offset within data buffer */ + size_t offset; +}; + +/** A USB mass storage device */ +struct usbblk_device { + /** Reference count */ + struct refcnt refcnt; + /** List of devices */ + struct list_head list; + + /** USB function */ + struct usb_function *func; + /** Bulk OUT endpoint */ + struct usb_endpoint out; + /** Bulk IN endpoint */ + struct usb_endpoint in; + + /** SCSI command-issuing interface */ + struct interface scsi; + /** SCSI data interface */ + struct interface data; + /** Command process */ + struct process process; + /** Device opened flag */ + int opened; + + /** Current command (if any) */ + struct usbblk_command cmd; +}; + +/** Command tag magic + * + * This is a policy decision. + */ +#define USBBLK_TAG_MAGIC 0x18ae0000 + +/** Maximum length of USB data block + * + * This is a policy decision. + */ +#define USBBLK_MAX_LEN 2048 + +/** Maximum endpoint fill level + * + * This is a policy decision. + */ +#define USBBLK_MAX_FILL 4 + +#endif /* _USBBLK_H */ diff --git a/src/drivers/usb/usbhub.c b/src/drivers/usb/usbhub.c index 47914bcdb..28d6cb33d 100644 --- a/src/drivers/usb/usbhub.c +++ b/src/drivers/usb/usbhub.c @@ -110,6 +110,10 @@ static void hub_complete ( struct usb_endpoint *ep, } done: + + /* Recycle I/O buffer */ + usb_recycle ( &hubdev->intr, iobuf ); + /* Start refill process */ process_add ( &hubdev->refill ); } @@ -243,8 +247,10 @@ static int hub_disable ( struct usb_hub *hub, struct usb_port *port ) { int rc; /* Disable port */ - if ( ( rc = usb_hub_clear_port_feature ( usb, port->address, - USB_HUB_PORT_ENABLE, 0 ) )!=0){ + if ( ( hub->protocol < USB_PROTO_3_0 ) && + ( ( rc = usb_hub_clear_port_feature ( usb, port->address, + USB_HUB_PORT_ENABLE, + 0 ) ) != 0 ) ) { DBGC ( hubdev, "HUB %s port %d could not disable: %s\n", hubdev->name, port->address, strerror ( rc ) ); return rc; diff --git a/src/drivers/usb/usbio.c b/src/drivers/usb/usbio.c index dfb93dab1..278b43cd3 100644 --- a/src/drivers/usb/usbio.c +++ b/src/drivers/usb/usbio.c @@ -29,6 +29,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include #include @@ -206,7 +207,7 @@ static int usbio_open ( struct usbio_device *usbio, unsigned int interface ) { path = usbio->path; usbpath = usbio->usbpath; usbpath->InterfaceNumber = interface; - end = efi_devpath_end ( path ); + end = efi_path_end ( path ); /* Locate handle for this endpoint's interface */ if ( ( efirc = bs->LocateDevicePath ( &efi_usb_io_protocol_guid, &path, @@ -1503,7 +1504,7 @@ static int usbio_path ( struct usbio_device *usbio ) { path = u.interface; /* Locate end of device path and sanity check */ - len = efi_devpath_len ( path ); + len = efi_path_len ( path ); if ( len < sizeof ( *usbpath ) ) { DBGC ( usbio, "USBIO %s underlength device path\n", efi_handle_name ( handle ) ); diff --git a/src/drivers/usb/xhci.c b/src/drivers/usb/xhci.c index e9a7f4c65..cc48af033 100644 --- a/src/drivers/usb/xhci.c +++ b/src/drivers/usb/xhci.c @@ -31,7 +31,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include #include #include #include @@ -294,9 +293,9 @@ static void xhci_init ( struct xhci_device *xhci, void *regs ) { /* Read structural parameters 2 */ hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 ); - xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 ); + xhci->scratch.count = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 ); DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n", - xhci->name, xhci->scratchpads ); + xhci->name, xhci->scratch.count ); /* Read capability parameters 1 */ hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 ); @@ -918,27 +917,29 @@ static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) { * align on its own size (rounded up to a power of two and * with a minimum of 64 bytes). */ - len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) ); - xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) ); - if ( ! xhci->dcbaa ) { + len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) ); + xhci->dcbaa.context = dma_alloc ( xhci->dma, &xhci->dcbaa.map, len, + xhci_align ( len ) ); + if ( ! xhci->dcbaa.context ) { DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name ); rc = -ENOMEM; goto err_alloc; } - memset ( xhci->dcbaa, 0, len ); + memset ( xhci->dcbaa.context, 0, len ); /* Program DCBAA pointer */ - dcbaap = virt_to_phys ( xhci->dcbaa ); + dcbaap = dma ( &xhci->dcbaa.map, xhci->dcbaa.context ); if ( ( rc = xhci_writeq ( xhci, dcbaap, xhci->op + XHCI_OP_DCBAAP ) ) != 0 ) goto err_writeq; - DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n", - xhci->name, dcbaap, ( dcbaap + len ) ); + DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n", xhci->name, + virt_to_phys ( xhci->dcbaa.context ), + ( virt_to_phys ( xhci->dcbaa.context ) + len ) ); return 0; err_writeq: - free_dma ( xhci->dcbaa, len ); + dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len ); err_alloc: return rc; } @@ -954,14 +955,14 @@ static void xhci_dcbaa_free ( struct xhci_device *xhci ) { /* Sanity check */ for ( i = 0 ; i <= xhci->slots ; i++ ) - assert ( xhci->dcbaa[i] == 0 ); + assert ( xhci->dcbaa.context[i] == 0 ); /* Clear DCBAA pointer */ xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP ); /* Free DCBAA */ - len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) ); - free_dma ( xhci->dcbaa, len ); + len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) ); + dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len ); } /****************************************************************************** @@ -978,32 +979,34 @@ static void xhci_dcbaa_free ( struct xhci_device *xhci ) { * @ret rc Return status code */ static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) { + struct xhci_scratchpad *scratch = &xhci->scratch; + size_t buffer_len; size_t array_len; - size_t len; - physaddr_t phys; + physaddr_t addr; unsigned int i; int rc; /* Do nothing if no scratchpad buffers are used */ - if ( ! xhci->scratchpads ) + if ( ! scratch->count ) return 0; - /* Allocate scratchpads */ - len = ( xhci->scratchpads * xhci->pagesize ); - xhci->scratchpad = umalloc ( len ); - if ( ! xhci->scratchpad ) { + /* Allocate scratchpad buffers */ + buffer_len = ( scratch->count * xhci->pagesize ); + scratch->buffer = dma_umalloc ( xhci->dma, &scratch->buffer_map, + buffer_len, xhci->pagesize ); + if ( ! scratch->buffer ) { DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n", xhci->name ); rc = -ENOMEM; goto err_alloc; } - memset_user ( xhci->scratchpad, 0, 0, len ); + memset_user ( scratch->buffer, 0, 0, buffer_len ); /* Allocate scratchpad array */ - array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] )); - xhci->scratchpad_array = - malloc_dma ( array_len, xhci_align ( array_len ) ); - if ( ! xhci->scratchpad_array ) { + array_len = ( scratch->count * sizeof ( scratch->array[0] ) ); + scratch->array = dma_alloc ( xhci->dma, &scratch->array_map, + array_len, xhci_align ( array_len ) ); + if ( ! scratch->array ) { DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer " "array\n", xhci->name ); rc = -ENOMEM; @@ -1011,25 +1014,28 @@ static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) { } /* Populate scratchpad array */ - for ( i = 0 ; i < xhci->scratchpads ; i++ ) { - phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize )); - xhci->scratchpad_array[i] = phys; + addr = dma_phys ( &scratch->buffer_map, + user_to_phys ( scratch->buffer, 0 ) ); + for ( i = 0 ; i < scratch->count ; i++ ) { + scratch->array[i] = cpu_to_le64 ( addr ); + addr += xhci->pagesize; } /* Set scratchpad array pointer */ - assert ( xhci->dcbaa != NULL ); - xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array )); + assert ( xhci->dcbaa.context != NULL ); + xhci->dcbaa.context[0] = cpu_to_le64 ( dma ( &scratch->array_map, + scratch->array ) ); DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n", - xhci->name, user_to_phys ( xhci->scratchpad, 0 ), - user_to_phys ( xhci->scratchpad, len ), - virt_to_phys ( xhci->scratchpad_array ), - ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) ); + xhci->name, user_to_phys ( scratch->buffer, 0 ), + user_to_phys ( scratch->buffer, buffer_len ), + virt_to_phys ( scratch->array ), + ( virt_to_phys ( scratch->array ) + array_len ) ); return 0; - free_dma ( xhci->scratchpad_array, array_len ); + dma_free ( &scratch->array_map, scratch->array, array_len ); err_alloc_array: - ufree ( xhci->scratchpad ); + dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len ); err_alloc: return rc; } @@ -1040,22 +1046,25 @@ static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) { * @v xhci xHCI device */ static void xhci_scratchpad_free ( struct xhci_device *xhci ) { + struct xhci_scratchpad *scratch = &xhci->scratch; size_t array_len; + size_t buffer_len; /* Do nothing if no scratchpad buffers are used */ - if ( ! xhci->scratchpads ) + if ( ! scratch->count ) return; /* Clear scratchpad array pointer */ - assert ( xhci->dcbaa != NULL ); - xhci->dcbaa[0] = 0; + assert ( xhci->dcbaa.context != NULL ); + xhci->dcbaa.context[0] = 0; /* Free scratchpad array */ - array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] )); - free_dma ( xhci->scratchpad_array, array_len ); + array_len = ( scratch->count * sizeof ( scratch->array[0] ) ); + dma_free ( &scratch->array_map, scratch->array, array_len ); - /* Free scratchpads */ - ufree ( xhci->scratchpad ); + /* Free scratchpad buffers */ + buffer_len = ( scratch->count * xhci->pagesize ); + dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len ); } /****************************************************************************** @@ -1202,7 +1211,8 @@ static int xhci_ring_alloc ( struct xhci_device *xhci, } /* Allocate TRBs */ - ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) ); + ring->trb = dma_alloc ( xhci->dma, &ring->map, ring->len, + xhci_align ( ring->len ) ); if ( ! ring->trb ) { rc = -ENOMEM; goto err_alloc_trb; @@ -1211,14 +1221,14 @@ static int xhci_ring_alloc ( struct xhci_device *xhci, /* Initialise Link TRB */ link = &ring->trb[count].link; - link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) ); + link->next = cpu_to_le64 ( dma ( &ring->map, ring->trb ) ); link->flags = XHCI_TRB_TC; link->type = XHCI_TRB_LINK; ring->link = link; return 0; - free_dma ( ring->trb, ring->len ); + dma_free ( &ring->map, ring->trb, ring->len ); err_alloc_trb: free ( ring->iobuf ); err_alloc_iobuf: @@ -1256,7 +1266,7 @@ static void xhci_ring_free ( struct xhci_trb_ring *ring ) { assert ( ring->iobuf[i] == NULL ); /* Free TRBs */ - free_dma ( ring->trb, ring->len ); + dma_free ( &ring->map, ring->trb, ring->len ); /* Free I/O buffers */ free ( ring->iobuf ); @@ -1422,13 +1432,14 @@ static int xhci_command_alloc ( struct xhci_device *xhci ) { goto err_ring_alloc; /* Program command ring control register */ - crp = virt_to_phys ( xhci->command.trb ); + crp = dma ( &xhci->command.map, xhci->command.trb ); if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR ) ) != 0 ) goto err_writeq; - DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n", - xhci->name, crp, ( crp + xhci->command.len ) ); + DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n", xhci->name, + virt_to_phys ( xhci->command.trb ), + ( virt_to_phys ( xhci->command.trb ) + xhci->command.len ) ); return 0; err_writeq: @@ -1469,7 +1480,8 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) { /* Allocate event ring */ count = ( 1 << XHCI_EVENT_TRBS_LOG2 ); len = ( count * sizeof ( event->trb[0] ) ); - event->trb = malloc_dma ( len, xhci_align ( len ) ); + event->trb = dma_alloc ( xhci->dma, &event->trb_map, len, + xhci_align ( len ) ); if ( ! event->trb ) { rc = -ENOMEM; goto err_alloc_trb; @@ -1477,22 +1489,25 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) { memset ( event->trb, 0, len ); /* Allocate event ring segment table */ - event->segment = malloc_dma ( sizeof ( event->segment[0] ), - xhci_align ( sizeof (event->segment[0]))); + event->segment = dma_alloc ( xhci->dma, &event->segment_map, + sizeof ( event->segment[0] ), + xhci_align ( sizeof (event->segment[0]))); if ( ! event->segment ) { rc = -ENOMEM; goto err_alloc_segment; } memset ( event->segment, 0, sizeof ( event->segment[0] ) ); - event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) ); + event->segment[0].base = cpu_to_le64 ( dma ( &event->trb_map, + event->trb ) ); event->segment[0].count = cpu_to_le32 ( count ); /* Program event ring registers */ writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) ); - if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ), + if ( ( rc = xhci_writeq ( xhci, dma ( &event->trb_map, event->trb ), xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 ) goto err_writeq_erdp; - if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ), + if ( ( rc = xhci_writeq ( xhci, + dma ( &event->segment_map, event->segment ), xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 ) goto err_writeq_erstba; @@ -1501,16 +1516,17 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) { ( virt_to_phys ( event->trb ) + len ), virt_to_phys ( event->segment ), ( virt_to_phys ( event->segment ) + - sizeof (event->segment[0] ) ) ); + sizeof ( event->segment[0] ) ) ); return 0; xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) ); err_writeq_erstba: xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) ); err_writeq_erdp: - free_dma ( event->trb, len ); + dma_free ( &event->segment_map, event->segment, + sizeof ( event->segment[0] ) ); err_alloc_segment: - free_dma ( event->segment, sizeof ( event->segment[0] ) ); + dma_free ( &event->trb_map, event->trb, len ); err_alloc_trb: return rc; } @@ -1531,12 +1547,13 @@ static void xhci_event_free ( struct xhci_device *xhci ) { xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) ); /* Free event ring segment table */ - free_dma ( event->segment, sizeof ( event->segment[0] ) ); + dma_free ( &event->segment_map, event->segment, + sizeof ( event->segment[0] ) ); /* Free event ring */ count = ( 1 << XHCI_EVENT_TRBS_LOG2 ); len = ( count * sizeof ( event->trb[0] ) ); - free_dma ( event->trb, len ); + dma_free ( &event->trb_map, event->trb, len ); } /** @@ -1577,6 +1594,9 @@ static void xhci_transfer ( struct xhci_device *xhci, iobuf = xhci_dequeue_multi ( &endpoint->ring ); assert ( iobuf != NULL ); + /* Unmap I/O buffer */ + iob_unmap ( iobuf ); + /* Check for errors */ if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) || ( trb->code == XHCI_CMPLT_SHORT ) ) ) { @@ -1745,7 +1765,7 @@ static void xhci_event_poll ( struct xhci_device *xhci ) { /* Update dequeue pointer if applicable */ if ( consumed ) { - xhci_writeq ( xhci, virt_to_phys ( trb ), + xhci_writeq ( xhci, dma ( &event->trb_map, trb ), xhci->run + XHCI_RUN_ERDP ( 0 ) ); profile_stop ( &xhci_event_profiler ); } @@ -1774,7 +1794,7 @@ static void xhci_abort ( struct xhci_device *xhci ) { /* Reset the command ring control register */ xhci_ring_reset ( &xhci->command ); - crp = virt_to_phys ( xhci->command.trb ); + crp = dma ( &xhci->command.map, xhci->command.trb ); xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR ); } @@ -1793,6 +1813,13 @@ static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) { unsigned int i; int rc; + /* Sanity check */ + if ( xhci->pending ) { + DBGC ( xhci, "XHCI %s command ring busy\n", xhci->name ); + rc = -EBUSY; + goto err_pending; + } + /* Record the pending command */ xhci->pending = trb; @@ -1835,6 +1862,7 @@ static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) { err_enqueue: xhci->pending = NULL; + err_pending: return rc; } @@ -1855,9 +1883,13 @@ static inline int xhci_nop ( struct xhci_device *xhci ) { nop->type = XHCI_TRB_NOP_CMD; /* Issue command and wait for completion */ - if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) + if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) { + DBGC ( xhci, "XHCI %s NOP failed: %s\n", + xhci->name, strerror ( rc ) ); return rc; + } + DBGC2 ( xhci, "XHCI %s NOP completed successfully\n", xhci->name ); return 0; } @@ -1942,13 +1974,15 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot, void *input ) ) { union xhci_trb trb; struct xhci_trb_context *context = &trb.context; + struct dma_mapping map; size_t len; void *input; int rc; /* Allocate an input context */ + memset ( &map, 0, sizeof ( map ) ); len = xhci_input_context_offset ( xhci, XHCI_CTX_END ); - input = malloc_dma ( len, xhci_align ( len ) ); + input = dma_alloc ( xhci->dma, &map, len, xhci_align ( len ) ); if ( ! input ) { rc = -ENOMEM; goto err_alloc; @@ -1961,7 +1995,7 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot, /* Construct command */ memset ( context, 0, sizeof ( *context ) ); context->type = type; - context->input = cpu_to_le64 ( virt_to_phys ( input ) ); + context->input = cpu_to_le64 ( dma ( &map, input ) ); context->slot = slot->id; /* Issue command and wait for completion */ @@ -1969,7 +2003,7 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot, goto err_command; err_command: - free_dma ( input, len ); + dma_free ( &map, input, len ); err_alloc: return rc; } @@ -1986,6 +2020,7 @@ static void xhci_address_device_input ( struct xhci_device *xhci, struct xhci_slot *slot, struct xhci_endpoint *endpoint, void *input ) { + struct xhci_trb_ring *ring = &endpoint->ring; struct xhci_control_context *control_ctx; struct xhci_slot_context *slot_ctx; struct xhci_endpoint_context *ep_ctx; @@ -2011,7 +2046,7 @@ static void xhci_address_device_input ( struct xhci_device *xhci, ep_ctx->type = XHCI_EP_TYPE_CONTROL; ep_ctx->burst = endpoint->ep->burst; ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu ); - ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) | + ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) | XHCI_EP_DCS ); ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN ); } @@ -2032,15 +2067,18 @@ static inline int xhci_address_device ( struct xhci_device *xhci, /* Assign device address */ if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0], XHCI_TRB_ADDRESS_DEVICE, - xhci_address_device_input ) ) != 0 ) + xhci_address_device_input ) ) != 0 ) { + DBGC ( xhci, "XHCI %s slot %d could not assign address: %s\n", + xhci->name, slot->id, strerror ( rc ) ); return rc; + } /* Get assigned address */ slot_ctx = ( slot->context + xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) ); usb->address = slot_ctx->address; - DBGC2 ( xhci, "XHCI %s assigned address %d to %s\n", - xhci->name, usb->address, usb->name ); + DBGC2 ( xhci, "XHCI %s slot %d assigned address %d to %s\n", + xhci->name, slot->id, usb->address, usb->name ); return 0; } @@ -2057,6 +2095,7 @@ static void xhci_configure_endpoint_input ( struct xhci_device *xhci, struct xhci_slot *slot, struct xhci_endpoint *endpoint, void *input ) { + struct xhci_trb_ring *ring = &endpoint->ring; struct xhci_control_context *control_ctx; struct xhci_slot_context *slot_ctx; struct xhci_endpoint_context *ep_ctx; @@ -2079,7 +2118,7 @@ static void xhci_configure_endpoint_input ( struct xhci_device *xhci, ep_ctx->type = endpoint->type; ep_ctx->burst = endpoint->ep->burst; ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu ); - ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) | + ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) | XHCI_EP_DCS ); ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */ } @@ -2100,8 +2139,11 @@ static inline int xhci_configure_endpoint ( struct xhci_device *xhci, /* Configure endpoint */ if ( ( rc = xhci_context ( xhci, slot, endpoint, XHCI_TRB_CONFIGURE_ENDPOINT, - xhci_configure_endpoint_input ) ) != 0 ) + xhci_configure_endpoint_input ) ) != 0 ) { + DBGC ( xhci, "XHCI %s slot %d ctx %d could not configure: %s\n", + xhci->name, slot->id, endpoint->ctx, strerror ( rc ) ); return rc; + } DBGC2 ( xhci, "XHCI %s slot %d ctx %d configured\n", xhci->name, slot->id, endpoint->ctx ); @@ -2151,8 +2193,12 @@ static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci, /* Deconfigure endpoint */ if ( ( rc = xhci_context ( xhci, slot, endpoint, XHCI_TRB_CONFIGURE_ENDPOINT, - xhci_deconfigure_endpoint_input ) ) != 0 ) + xhci_deconfigure_endpoint_input ) ) != 0 ) { + DBGC ( xhci, "XHCI %s slot %d ctx %d could not deconfigure: " + "%s\n", xhci->name, slot->id, endpoint->ctx, + strerror ( rc ) ); return rc; + } DBGC2 ( xhci, "XHCI %s slot %d ctx %d deconfigured\n", xhci->name, slot->id, endpoint->ctx ); @@ -2206,8 +2252,12 @@ static inline int xhci_evaluate_context ( struct xhci_device *xhci, /* Configure endpoint */ if ( ( rc = xhci_context ( xhci, slot, endpoint, XHCI_TRB_EVALUATE_CONTEXT, - xhci_evaluate_context_input ) ) != 0 ) + xhci_evaluate_context_input ) ) != 0 ) { + DBGC ( xhci, "XHCI %s slot %d ctx %d could not (re-)evaluate: " + "%s\n", xhci->name, slot->id, endpoint->ctx, + strerror ( rc ) ); return rc; + } DBGC2 ( xhci, "XHCI %s slot %d ctx %d (re-)evaluated\n", xhci->name, slot->id, endpoint->ctx ); @@ -2297,6 +2347,7 @@ xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci, unsigned int mask; unsigned int index; unsigned int dcs; + physaddr_t addr; int rc; /* Construct command */ @@ -2305,8 +2356,8 @@ xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci, mask = ring->mask; dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS ); index = ( cons & mask ); - dequeue->dequeue = - cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs ); + addr = dma ( &ring->map, &ring->trb[index] ); + dequeue->dequeue = cpu_to_le64 ( addr | dcs ); dequeue->slot = slot->id; dequeue->endpoint = endpoint->ctx; dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER; @@ -2425,6 +2476,7 @@ static void xhci_endpoint_close ( struct usb_endpoint *ep ) { /* Cancel any incomplete transfers */ while ( xhci_ring_fill ( &endpoint->ring ) ) { iobuf = xhci_dequeue_multi ( &endpoint->ring ); + iob_unmap ( iobuf ); usb_complete_err ( ep, iobuf, -ECANCELED ); } @@ -2491,6 +2543,7 @@ static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) { static int xhci_endpoint_message ( struct usb_endpoint *ep, struct io_buffer *iobuf ) { struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); + struct xhci_device *xhci = endpoint->xhci; struct usb_setup_packet *packet; unsigned int input; size_t len; @@ -2520,10 +2573,15 @@ static int xhci_endpoint_message ( struct usb_endpoint *ep, if ( len ) setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT ); + /* Map I/O buffer */ + if ( ( rc = iob_map ( iobuf, xhci->dma, len, + ( input ? DMA_RX : DMA_TX ) ) ) != 0 ) + goto err_map; + /* Construct data stage TRB, if applicable */ if ( len ) { data = &(trb++)->data; - data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) ); + data->data = cpu_to_le64 ( iob_dma ( iobuf ) ); data->len = cpu_to_le32 ( len ); data->type = XHCI_TRB_DATA; data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT ); @@ -2539,13 +2597,18 @@ static int xhci_endpoint_message ( struct usb_endpoint *ep, /* Enqueue TRBs */ if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs, ( trb - trbs ) ) ) != 0 ) - return rc; + goto err_enqueue; /* Ring the doorbell */ xhci_doorbell ( &endpoint->ring ); profile_stop ( &xhci_message_profiler ); return 0; + + err_enqueue: + iob_unmap ( iobuf ); + err_map: + return rc; } /** @@ -2579,12 +2642,13 @@ static unsigned int xhci_endpoint_count ( size_t len, int zlp ) { static int xhci_endpoint_stream ( struct usb_endpoint *ep, struct io_buffer *iobuf, int zlp ) { struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); - void *data = iobuf->data; + struct xhci_device *xhci = endpoint->xhci; size_t len = iob_len ( iobuf ); unsigned int count = xhci_endpoint_count ( len, zlp ); union xhci_trb trbs[count]; union xhci_trb *trb = trbs; struct xhci_trb_normal *normal; + physaddr_t data; unsigned int i; size_t trb_len; int rc; @@ -2592,6 +2656,13 @@ static int xhci_endpoint_stream ( struct usb_endpoint *ep, /* Profile stream transfers */ profile_start ( &xhci_stream_profiler ); + /* Map I/O buffer */ + if ( ( rc = iob_map ( iobuf, xhci->dma, len, + ( ( ep->address & USB_DIR_IN ) ? + DMA_RX : DMA_TX ) ) ) != 0 ) + goto err_map; + data = iob_dma ( iobuf ); + /* Construct normal TRBs */ memset ( &trbs, 0, sizeof ( trbs ) ); for ( i = 0 ; i < count ; i ++ ) { @@ -2603,7 +2674,7 @@ static int xhci_endpoint_stream ( struct usb_endpoint *ep, /* Construct normal TRB */ normal = &trb->normal; - normal->data = cpu_to_le64 ( virt_to_phys ( data ) ); + normal->data = cpu_to_le64 ( data ); normal->len = cpu_to_le32 ( trb_len ); normal->type = XHCI_TRB_NORMAL; normal->flags = XHCI_TRB_CH; @@ -2624,13 +2695,18 @@ static int xhci_endpoint_stream ( struct usb_endpoint *ep, /* Enqueue TRBs */ if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs, count ) ) != 0 ) - return rc; + goto err_enqueue; /* Ring the doorbell */ xhci_doorbell ( &endpoint->ring ); profile_stop ( &xhci_stream_profiler ); return 0; + + err_enqueue: + iob_unmap ( iobuf ); + err_map: + return rc; } /****************************************************************************** @@ -2693,7 +2769,8 @@ static int xhci_device_open ( struct usb_device *usb ) { /* Allocate a device context */ len = xhci_device_context_offset ( xhci, XHCI_CTX_END ); - slot->context = malloc_dma ( len, xhci_align ( len ) ); + slot->context = dma_alloc ( xhci->dma, &slot->map, len, + xhci_align ( len ) ); if ( ! slot->context ) { rc = -ENOMEM; goto err_alloc_context; @@ -2701,16 +2778,17 @@ static int xhci_device_open ( struct usb_device *usb ) { memset ( slot->context, 0, len ); /* Set device context base address */ - assert ( xhci->dcbaa[id] == 0 ); - xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) ); + assert ( xhci->dcbaa.context[id] == 0 ); + xhci->dcbaa.context[id] = cpu_to_le64 ( dma ( &slot->map, + slot->context ) ); DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n", xhci->name, slot->id, virt_to_phys ( slot->context ), ( virt_to_phys ( slot->context ) + len ), usb->name ); return 0; - xhci->dcbaa[id] = 0; - free_dma ( slot->context, len ); + xhci->dcbaa.context[id] = 0; + dma_free ( &slot->map, slot->context, len ); err_alloc_context: xhci->slot[id] = NULL; free ( slot ); @@ -2750,8 +2828,8 @@ static void xhci_device_close ( struct usb_device *usb ) { /* Free slot */ if ( slot->context ) { - free_dma ( slot->context, len ); - xhci->dcbaa[id] = 0; + dma_free ( &slot->map, slot->context, len ); + xhci->dcbaa.context[id] = 0; } xhci->slot[id] = NULL; free ( slot ); @@ -2944,8 +3022,7 @@ static void xhci_hub_close ( struct usb_hub *hub __unused ) { * @ret rc Return status code */ static int xhci_root_open ( struct usb_hub *hub ) { - struct usb_bus *bus = hub->bus; - struct xhci_device *xhci = usb_bus_get_hostdata ( bus ); + struct xhci_device *xhci = usb_hub_get_drvdata ( hub ); struct usb_port *port; uint32_t portsc; unsigned int i; @@ -2982,9 +3059,6 @@ static int xhci_root_open ( struct usb_hub *hub ) { */ mdelay ( XHCI_LINK_STATE_DELAY_MS ); - /* Record hub driver private data */ - usb_hub_set_drvdata ( hub, xhci ); - return 0; } @@ -2993,10 +3067,9 @@ static int xhci_root_open ( struct usb_hub *hub ) { * * @v hub USB hub */ -static void xhci_root_close ( struct usb_hub *hub ) { +static void xhci_root_close ( struct usb_hub *hub __unused ) { - /* Clear hub driver private data */ - usb_hub_set_drvdata ( hub, NULL ); + /* Nothing to do */ } /** @@ -3051,6 +3124,19 @@ static int xhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) { portsc |= XHCI_PORTSC_PED; writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) ); + /* Allow time for link state to stabilise */ + mdelay ( XHCI_LINK_STATE_DELAY_MS ); + + /* Set link state to RxDetect for USB3 ports */ + if ( port->protocol >= USB_PROTO_3_0 ) { + portsc &= XHCI_PORTSC_PRESERVE; + portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS ); + writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) ); + } + + /* Allow time for link state to stabilise */ + mdelay ( XHCI_LINK_STATE_DELAY_MS ); + return 0; } @@ -3253,7 +3339,7 @@ static int xhci_probe ( struct pci_device *pci ) { /* Map registers */ bar_start = pci_bar_start ( pci, XHCI_BAR ); bar_size = pci_bar_size ( pci, XHCI_BAR ); - xhci->regs = ioremap ( bar_start, bar_size ); + xhci->regs = pci_ioremap ( pci, bar_start, bar_size ); if ( ! xhci->regs ) { rc = -ENODEV; goto err_ioremap; @@ -3262,6 +3348,11 @@ static int xhci_probe ( struct pci_device *pci ) { /* Initialise xHCI device */ xhci_init ( xhci, xhci->regs ); + /* Configure DMA device */ + xhci->dma = &pci->dma; + if ( xhci->addr64 ) + dma_set_mask_64bit ( xhci->dma ); + /* Initialise USB legacy support and claim ownership */ xhci_legacy_init ( xhci ); xhci_legacy_claim ( xhci ); diff --git a/src/drivers/usb/xhci.h b/src/drivers/usb/xhci.h index 83bf71e7e..6e02d70e0 100644 --- a/src/drivers/usb/xhci.h +++ b/src/drivers/usb/xhci.h @@ -243,7 +243,7 @@ enum xhci_default_psi_value { #define XHCI_PORTSC_LWS 0x00010000UL /** Time to delay after writing the port link state */ -#define XHCI_LINK_STATE_DELAY_MS 20 +#define XHCI_LINK_STATE_DELAY_MS 100 /** Connect status change */ #define XHCI_PORTSC_CSC 0x00020000UL @@ -854,6 +854,8 @@ struct xhci_trb_ring { union xhci_trb *trb; /** Length of transfer request blocks */ size_t len; + /** DMA mapping */ + struct dma_mapping map; /** Link TRB (if applicable) */ struct xhci_trb_link *link; @@ -869,8 +871,12 @@ struct xhci_event_ring { unsigned int cons; /** Event ring segment table */ struct xhci_event_ring_segment *segment; + /** Event ring segment table DMA mapping */ + struct dma_mapping segment_map; /** Transfer request blocks */ union xhci_trb *trb; + /** Transfer request blocks DMA mapping */ + struct dma_mapping trb_map; }; /** @@ -1035,10 +1041,34 @@ struct xhci_pch { /** Invalid protocol speed ID values quirk */ #define XHCI_BAD_PSIV 0x0002 +/** Device context base address array */ +struct xhci_dcbaa { + /** Context base addresses */ + uint64_t *context; + /** DMA mapping */ + struct dma_mapping map; +}; + +/** Scratchpad buffer */ +struct xhci_scratchpad { + /** Number of page-sized scratchpad buffers */ + unsigned int count; + /** Scratchpad buffer area */ + userptr_t buffer; + /** Buffer DMA mapping */ + struct dma_mapping buffer_map; + /** Scratchpad array */ + uint64_t *array; + /** Array DMA mapping */ + struct dma_mapping array_map; +}; + /** An xHCI device */ struct xhci_device { /** Registers */ void *regs; + /** DMA device */ + struct dma_device *dma; /** Name */ const char *name; /** Quirks */ @@ -1060,9 +1090,6 @@ struct xhci_device { /** Number of ports */ unsigned int ports; - /** Number of page-sized scratchpad buffers */ - unsigned int scratchpads; - /** 64-bit addressing capability */ int addr64; /** Context size shift */ @@ -1077,12 +1104,10 @@ struct xhci_device { unsigned int legacy; /** Device context base address array */ - uint64_t *dcbaa; + struct xhci_dcbaa dcbaa; - /** Scratchpad buffer area */ - userptr_t scratchpad; - /** Scratchpad buffer array */ - uint64_t *scratchpad_array; + /** Scratchpad buffer */ + struct xhci_scratchpad scratch; /** Command ring */ struct xhci_trb_ring command; @@ -1111,6 +1136,8 @@ struct xhci_slot { unsigned int id; /** Slot context */ struct xhci_slot_context *context; + /** DMA mapping */ + struct dma_mapping map; /** Route string */ unsigned int route; /** Root hub port number */ diff --git a/src/hci/commands/ifmgmt_cmd.c b/src/hci/commands/ifmgmt_cmd.c index c89af2e81..591cb3da8 100644 --- a/src/hci/commands/ifmgmt_cmd.c +++ b/src/hci/commands/ifmgmt_cmd.c @@ -193,6 +193,8 @@ static int ifstat_exec ( int argc, char **argv ) { /** "ifconf" options */ struct ifconf_options { + /** Configuration timeout */ + unsigned long timeout; /** Configurator */ struct net_device_configurator *configurator; }; @@ -202,6 +204,9 @@ static struct option_descriptor ifconf_opts[] = { OPTION_DESC ( "configurator", 'c', required_argument, struct ifconf_options, configurator, parse_netdev_configurator ), + OPTION_DESC ( "timeout", 't', required_argument, + struct ifconf_options, timeout, + parse_timeout ), }; /** @@ -216,7 +221,8 @@ static int ifconf_payload ( struct net_device *netdev, int rc; /* Attempt configuration */ - if ( ( rc = ifconf ( netdev, opts->configurator ) ) != 0 ) { + if ( ( rc = ifconf ( netdev, opts->configurator, + opts->timeout ) ) != 0 ) { /* Close device on failure, to avoid memory exhaustion */ netdev_close ( netdev ); @@ -244,6 +250,58 @@ int ifconf_exec ( int argc, char **argv ) { return ifcommon_exec ( argc, argv, &ifconf_cmd ); } +/** "iflinkwait" option list */ +struct iflinkwait_options { + /** Link timeout */ + unsigned long timeout; +}; + +/** "iflinkwait" option list */ +static struct option_descriptor iflinkwait_opts[] = { + OPTION_DESC ( "timeout", 't', required_argument, + struct iflinkwait_options, timeout, parse_timeout ), +}; + +/** + * "iflinkwait" payload + * + * @v netdev Network device + * @v opts Command options + * @ret rc Return status code + */ +static int iflinkwait_payload ( struct net_device *netdev, + struct iflinkwait_options *opts ) { + int rc; + + /* Wait for link-up */ + if ( ( rc = iflinkwait ( netdev, opts->timeout, 1 ) ) != 0 ) { + + /* Close device on failure, to avoid memory exhaustion */ + netdev_close ( netdev ); + + return rc; + } + + return 0; +} + +/** "iflinkwait" command descriptor */ +static struct ifcommon_command_descriptor iflinkwait_cmd = + IFCOMMON_COMMAND_DESC ( struct iflinkwait_options, iflinkwait_opts, + 0, MAX_ARGUMENTS, "[...]", + iflinkwait_payload, 1 ); + +/** + * The "iflinkwait" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int iflinkwait_exec ( int argc, char **argv ) { + return ifcommon_exec ( argc, argv, &iflinkwait_cmd ); +} + /** Interface management commands */ struct command ifmgmt_commands[] __command = { { @@ -262,4 +320,8 @@ struct command ifmgmt_commands[] __command = { .name = "ifconf", .exec = ifconf_exec, }, + { + .name = "iflinkwait", + .exec = iflinkwait_exec, + }, }; diff --git a/src/hci/commands/image_archive_cmd.c b/src/hci/commands/image_archive_cmd.c new file mode 100644 index 000000000..a2212aecf --- /dev/null +++ b/src/hci/commands/image_archive_cmd.c @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include + +/** @file + * + * Archive image commands + * + */ + +/** "imgextract" options */ +struct imgextract_options { + /** Image name */ + char *name; + /** Keep original image */ + int keep; + /** Download timeout */ + unsigned long timeout; +}; + +/** "imgextract" option list */ +static struct option_descriptor imgextract_opts[] = { + OPTION_DESC ( "name", 'n', required_argument, + struct imgextract_options, name, parse_string ), + OPTION_DESC ( "keep", 'k', no_argument, + struct imgextract_options, keep, parse_flag ), + OPTION_DESC ( "timeout", 't', required_argument, + struct imgextract_options, timeout, parse_timeout ), +}; + +/** "imgextract" command descriptor */ +static struct command_descriptor imgextract_cmd = + COMMAND_DESC ( struct imgextract_options, imgextract_opts, 1, 1, NULL ); + +/** + * The "imgextract" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int imgextract_exec ( int argc, char **argv ) { + struct imgextract_options opts; + struct image *image; + int rc; + + /* Parse options */ + if ( ( rc = parse_options ( argc, argv, &imgextract_cmd, + &opts ) ) != 0 ) + goto err_parse; + + /* Acquire image */ + if ( ( rc = imgacquire ( argv[optind], opts.timeout, &image ) ) != 0 ) + goto err_acquire; + + /* Extract archive image */ + if ( ( rc = imgextract ( image, opts.name ) ) != 0 ) + goto err_extract; + + /* Success */ + rc = 0; + + err_extract: + /* Discard original image unless --keep was specified */ + if ( ! opts.keep ) + unregister_image ( image ); + err_acquire: + err_parse: + return rc; +} + +/** Archive image commands */ +struct command image_archive_commands[] __command = { + { + .name = "imgextract", + .exec = imgextract_exec, + }, +}; diff --git a/src/hci/commands/image_mem_cmd.c b/src/hci/commands/image_mem_cmd.c new file mode 100644 index 000000000..c8bfab1ad --- /dev/null +++ b/src/hci/commands/image_mem_cmd.c @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** @file + * + * Read memory command + * + */ + +/** "imgmem" options */ +struct imgmem_options { + /** Image name */ + char *name; +}; + +/** "imgmem" option list */ +static struct option_descriptor imgmem_opts[] = { + OPTION_DESC ( "name", 'n', required_argument, + struct imgmem_options, name, parse_string ), +}; + +/** "imgmem" command descriptor */ +static struct command_descriptor imgmem_cmd = + COMMAND_DESC ( struct imgmem_options, imgmem_opts, 2, 2, + "
" ); + +/** + * The "imgmem" command + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code + */ +static int imgmem_exec ( int argc, char **argv ) { + struct imgmem_options opts; + unsigned int data; + unsigned int len; + int rc; + + /* Parse options */ + if ( ( rc = parse_options ( argc, argv, &imgmem_cmd, &opts ) ) != 0 ) + return rc; + + /* Use start address as name if none specified */ + if ( ! opts.name ) + opts.name = argv[optind]; + + /* Parse address */ + if ( ( rc = parse_integer ( argv[optind++], &data ) ) != 0 ) + return rc; + + /* Parse length */ + if ( ( rc = parse_integer ( argv[optind++], &len ) ) != 0 ) + return rc; + + /* Create image */ + if ( ( rc = imgmem ( opts.name, phys_to_user ( data ), len ) ) != 0 ) + return rc; + + return 0; +} + +/** Read memory command */ +struct command imgmem_commands[] __command = { + { + .name = "imgmem", + .exec = imgmem_exec, + }, +}; diff --git a/src/hci/commands/nvo_cmd.c b/src/hci/commands/nvo_cmd.c index ac0d60651..6ad7e7428 100644 --- a/src/hci/commands/nvo_cmd.c +++ b/src/hci/commands/nvo_cmd.c @@ -100,20 +100,40 @@ static int show_exec ( int argc, char **argv ) { } /** "set", "clear", and "read" options */ -struct set_core_options {}; +struct set_core_options { + /** Timeout */ + unsigned long timeout; +}; /** "set", "clear", and "read" option list */ -static struct option_descriptor set_core_opts[] = {}; +static union { + /* "set" takes no options */ + struct option_descriptor set[0]; + /* "clear" takes no options */ + struct option_descriptor clear[0]; + /* "read" takes --timeout option */ + struct option_descriptor read[1]; +} set_core_opts = { + .read = { + OPTION_DESC ( "timeout", 't', required_argument, + struct set_core_options, timeout, parse_timeout ), + }, +}; /** "set" command descriptor */ static struct command_descriptor set_cmd = - COMMAND_DESC ( struct set_core_options, set_core_opts, 1, MAX_ARGUMENTS, - " " ); + COMMAND_DESC ( struct set_core_options, set_core_opts.set, + 1, MAX_ARGUMENTS, " " ); -/** "clear" and "read" command descriptor */ -static struct command_descriptor clear_read_cmd = - COMMAND_DESC ( struct set_core_options, set_core_opts, 1, 1, - "" ); +/** "clear" command descriptor */ +static struct command_descriptor clear_cmd = + COMMAND_DESC ( struct set_core_options, set_core_opts.clear, + 1, 1, "" ); + +/** "read" command descriptor */ +static struct command_descriptor read_cmd = + COMMAND_DESC ( struct set_core_options, set_core_opts.read, + 1, 1, "" ); /** * "set", "clear", and "read" command @@ -127,6 +147,7 @@ static struct command_descriptor clear_read_cmd = static int set_core_exec ( int argc, char **argv, struct command_descriptor *cmd, int ( * get_value ) ( struct named_setting *setting, + struct set_core_options *opts, char **args, char **value ) ) { struct set_core_options opts; struct named_setting setting; @@ -143,7 +164,8 @@ static int set_core_exec ( int argc, char **argv, goto err_parse_setting; /* Parse setting value */ - if ( ( rc = get_value ( &setting, &argv[ optind + 1 ], &value ) ) != 0 ) + if ( ( rc = get_value ( &setting, &opts, &argv[ optind + 1 ], + &value ) ) != 0 ) goto err_get_value; /* Apply default type if necessary */ @@ -170,11 +192,13 @@ static int set_core_exec ( int argc, char **argv, * Get setting value for "set" command * * @v setting Named setting + * @v opts Options list * @v args Remaining arguments * @ret value Setting value * @ret rc Return status code */ static int set_value ( struct named_setting *setting __unused, + struct set_core_options *opts __unused, char **args, char **value ) { *value = concat_args ( args ); @@ -200,10 +224,12 @@ static int set_exec ( int argc, char **argv ) { * * @v setting Named setting * @v args Remaining arguments + * @v opts Options list * @ret value Setting value * @ret rc Return status code */ static int clear_value ( struct named_setting *setting __unused, + struct set_core_options *opts __unused, char **args __unused, char **value ) { *value = NULL; @@ -218,7 +244,7 @@ static int clear_value ( struct named_setting *setting __unused, * @ret rc Return status code */ static int clear_exec ( int argc, char **argv ) { - return set_core_exec ( argc, argv, &clear_read_cmd, clear_value ); + return set_core_exec ( argc, argv, &clear_cmd, clear_value ); } /** @@ -226,11 +252,13 @@ static int clear_exec ( int argc, char **argv ) { * * @v setting Named setting * @v args Remaining arguments + * @v opts Options list * @ret value Setting value * @ret rc Return status code */ -static int read_value ( struct named_setting *setting, char **args __unused, - char **value ) { +static int read_value ( struct named_setting *setting, + struct set_core_options *opts, + char **args __unused, char **value ) { char *existing; int rc; @@ -241,7 +269,8 @@ static int read_value ( struct named_setting *setting, char **args __unused, NULL, &setting->setting, &existing ); /* Read new value */ - if ( ( rc = readline_history ( NULL, existing, NULL, value ) ) != 0 ) + if ( ( rc = readline_history ( NULL, existing, NULL, opts->timeout, + value ) ) != 0 ) goto err_readline; err_readline: @@ -257,7 +286,7 @@ static int read_value ( struct named_setting *setting, char **args __unused, * @ret rc Return status code */ static int read_exec ( int argc, char **argv ) { - return set_core_exec ( argc, argv, &clear_read_cmd, read_value ); + return set_core_exec ( argc, argv, &read_cmd, read_value ); } /** "inc" options */ diff --git a/src/hci/linux_args.c b/src/hci/linux_args.c index 5f903e3b6..12020bd0b 100644 --- a/src/hci/linux_args.c +++ b/src/hci/linux_args.c @@ -18,7 +18,6 @@ FILE_LICENCE(GPL2_OR_LATER); -#include #include #include #include @@ -27,21 +26,8 @@ FILE_LICENCE(GPL2_OR_LATER); #include #include -/** Saved argc */ -static int saved_argc = 0; -/** Saved argv */ -static char ** saved_argv; - -/** - * Save argc and argv for later access. - * - * To be called by linuxprefix - */ -__asmcall void save_args(int argc, char **argv) -{ - saved_argc = argc; - saved_argv = argv; -} +int linux_argc; +char **linux_argv; /** Supported command-line options */ static struct option options[] = { @@ -138,7 +124,7 @@ void linux_args_parse() while (1) { int option_index = 0; - c = getopt_long(saved_argc, saved_argv, "", options, &option_index); + c = getopt_long(linux_argc, linux_argv, "", options, &option_index); if (c == -1) break; diff --git a/src/hci/readline.c b/src/hci/readline.c index 83a2e0b90..852c4503a 100644 --- a/src/hci/readline.c +++ b/src/hci/readline.c @@ -248,6 +248,7 @@ void history_free ( struct readline_history *history ) { * @v prompt Prompt string * @v prefill Prefill string, or NULL for no prefill * @v history History buffer, or NULL for no history + * @v timeout Timeout period, in ticks (0=indefinite) * @ret line Line read from console (excluding terminating newline) * @ret rc Return status code * @@ -255,7 +256,8 @@ void history_free ( struct readline_history *history ) { * eventually call free() to release the storage. */ int readline_history ( const char *prompt, const char *prefill, - struct readline_history *history, char **line ) { + struct readline_history *history, unsigned long timeout, + char **line ) { char buf[READLINE_MAX]; struct edit_string string; int key; @@ -285,8 +287,17 @@ int readline_history ( const char *prompt, const char *prefill, } while ( 1 ) { + + /* Get keypress */ + key = getkey ( timeout ); + if ( key < 0 ) { + rc = -ETIMEDOUT; + goto done; + } + timeout = 0; + /* Handle keypress */ - key = edit_string ( &string, getkey ( 0 ) ); + key = edit_string ( &string, key ); sync_console ( &string ); move_by = 0; switch ( key ) { @@ -342,6 +353,6 @@ int readline_history ( const char *prompt, const char *prefill, char * readline ( const char *prompt ) { char *line; - readline_history ( prompt, NULL, NULL, &line ); + readline_history ( prompt, NULL, NULL, 0, &line ); return line; } diff --git a/src/hci/shell.c b/src/hci/shell.c index 276eb3527..8ecf73a6f 100644 --- a/src/hci/shell.c +++ b/src/hci/shell.c @@ -91,7 +91,7 @@ int shell ( void ) { /* Read and execute commands */ do { - readline_history ( shell_prompt, NULL, &history, &line ); + readline_history ( shell_prompt, NULL, &history, 0, &line ); if ( line ) { rc = system ( line ); free ( line ); diff --git a/src/image/efi_image.c b/src/image/efi_image.c index 47580c0db..3c98decbf 100644 --- a/src/image/efi_image.c +++ b/src/image/efi_image.c @@ -26,10 +26,11 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include #include -#include +#include #include #include #include +#include #include #include #include @@ -74,7 +75,7 @@ efi_image_path ( struct image *image, EFI_DEVICE_PATH_PROTOCOL *parent ) { size_t len; /* Calculate device path lengths */ - prefix_len = efi_devpath_len ( parent ); + prefix_len = efi_path_len ( parent ); name_len = strlen ( image->name ); filepath_len = ( SIZE_OF_FILEPATH_DEVICE_PATH + ( name_len + 1 /* NUL */ ) * sizeof ( wchar_t ) ); @@ -140,6 +141,7 @@ static int efi_image_exec ( struct image *image ) { void *interface; } loaded; EFI_HANDLE handle; + EFI_MEMORY_TYPE type; wchar_t *cmdline; EFI_STATUS efirc; int rc; @@ -193,6 +195,7 @@ static int efi_image_exec ( struct image *image ) { } /* Attempt loading image */ + handle = NULL; if ( ( efirc = bs->LoadImage ( FALSE, efi_image_handle, path, user_to_virt ( image->data, 0 ), image->len, &handle ) ) != 0 ) { @@ -200,7 +203,11 @@ static int efi_image_exec ( struct image *image ) { rc = -EEFI_LOAD ( efirc ); DBGC ( image, "EFIIMAGE %p could not load: %s\n", image, strerror ( rc ) ); - goto err_load_image; + if ( efirc == EFI_SECURITY_VIOLATION ) { + goto err_load_image_security_violation; + } else { + goto err_load_image; + } } /* Get the loaded image protocol for the newly loaded image */ @@ -226,6 +233,9 @@ static int efi_image_exec ( struct image *image ) { assert ( loaded.image->LoadOptionsSize == 0 ); assert ( loaded.image->LoadOptions == NULL ); + /* Record image code type */ + type = loaded.image->ImageCodeType; + /* Set command line */ loaded.image->LoadOptions = cmdline; loaded.image->LoadOptionsSize = @@ -248,6 +258,12 @@ static int efi_image_exec ( struct image *image ) { goto err_start_image; } + /* If image was a driver, connect it up to anything available */ + if ( type == EfiBootServicesCode ) { + DBGC ( image, "EFIIMAGE %p connecting drivers\n", image ); + efi_driver_reconnect_all(); + } + /* Success */ rc = 0; @@ -268,6 +284,7 @@ static int efi_image_exec ( struct image *image ) { * call UnloadImage()). We therefore ignore any failures from * the UnloadImage() call itself. */ + err_load_image_security_violation: if ( rc != 0 ) bs->UnloadImage ( handle ); err_load_image: @@ -303,6 +320,7 @@ static int efi_image_probe ( struct image *image ) { int rc; /* Attempt loading image */ + handle = NULL; if ( ( efirc = bs->LoadImage ( FALSE, efi_image_handle, &empty_path, user_to_virt ( image->data, 0 ), image->len, &handle ) ) != 0 ) { @@ -310,7 +328,11 @@ static int efi_image_probe ( struct image *image ) { rc = -EEFI_LOAD ( efirc ); DBGC ( image, "EFIIMAGE %p could not load: %s\n", image, strerror ( rc ) ); - return rc; + if ( efirc == EFI_SECURITY_VIOLATION ) { + goto err_load_image_security_violation; + } else { + goto err_load_image; + } } /* Unload the image. We can't leave it loaded, because we @@ -319,6 +341,11 @@ static int efi_image_probe ( struct image *image ) { bs->UnloadImage ( handle ); return 0; + + err_load_image_security_violation: + bs->UnloadImage ( handle ); + err_load_image: + return rc; } /** EFI image type */ diff --git a/src/image/embedded.c b/src/image/embedded.c index 376e5d299..3c4bee655 100644 --- a/src/image/embedded.c +++ b/src/image/embedded.c @@ -83,6 +83,9 @@ static void embedded_init ( void ) { image->name, strerror ( rc ) ); return; } + + /* Trust the selected image implicitly */ + image_trust ( image ); } /** Embedded image initialisation function */ diff --git a/src/image/gzip.c b/src/image/gzip.c new file mode 100644 index 000000000..98376e113 --- /dev/null +++ b/src/image/gzip.c @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * gzip compressed images + * + */ + +/** + * Extract gzip image + * + * @v image Image + * @v extracted Extracted image + * @ret rc Return status code + */ +static int gzip_extract ( struct image *image, struct image *extracted ) { + struct gzip_header header; + struct gzip_extra_header extra; + struct gzip_crc_header crc; + struct gzip_footer footer; + struct deflate_chunk in; + unsigned int strings; + size_t offset; + size_t len; + off_t nul; + int rc; + + /* Sanity check */ + assert ( image->len >= ( sizeof ( header ) + sizeof ( footer ) ) ); + + /* Extract footer */ + len = ( image->len - sizeof ( footer ) ); + copy_from_user ( &footer, image->data, len, sizeof ( footer ) ); + + /* Extract fixed header */ + copy_from_user ( &header, image->data, 0, sizeof ( header ) ); + offset = sizeof ( header ); + assert ( offset <= ( image->len - sizeof ( footer ) ) ); + + /* Skip extra header, if present */ + if ( header.flags & GZIP_FL_EXTRA ) { + copy_from_user ( &extra, image->data, offset, + sizeof ( extra ) ); + offset += sizeof ( extra ); + offset += le16_to_cpu ( extra.len ); + if ( offset > len ) { + DBGC ( image, "GZIP %p overlength extra header\n", + image ); + return -EINVAL; + } + } + assert ( offset <= ( image->len - sizeof ( footer ) ) ); + + /* Skip name and/or comment, if present */ + strings = 0; + if ( header.flags & GZIP_FL_NAME ) + strings++; + if ( header.flags & GZIP_FL_COMMENT ) + strings++; + while ( strings-- ) { + nul = memchr_user ( image->data, offset, 0, ( len - offset ) ); + if ( nul < 0 ) { + DBGC ( image, "GZIP %p overlength name/comment\n", + image ); + return -EINVAL; + } + offset = ( nul + 1 /* NUL */ ); + } + assert ( offset <= ( image->len - sizeof ( footer ) ) ); + + /* Skip CRC, if present */ + if ( header.flags & GZIP_FL_HCRC ) { + offset += sizeof ( crc ); + if ( offset > len ) { + DBGC ( image, "GZIP %p overlength CRC header\n", + image ); + return -EINVAL; + } + } + + /* Initialise input chunk */ + deflate_chunk_init ( &in, userptr_add ( image->data, offset ), 0, len ); + + /* Presize extracted image */ + if ( ( rc = image_set_len ( extracted, + le32_to_cpu ( footer.len ) ) ) != 0 ) { + DBGC ( image, "GZIP %p could not presize: %s\n", + image, strerror ( rc ) ); + return rc; + } + + /* Decompress image (expanding if necessary) */ + if ( ( rc = zlib_deflate ( DEFLATE_RAW, &in, extracted ) ) != 0 ) { + DBGC ( image, "GZIP %p could not decompress: %s\n", + image, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Probe gzip image + * + * @v image gzip image + * @ret rc Return status code + */ +static int gzip_probe ( struct image *image ) { + struct gzip_header header; + struct gzip_footer footer; + + /* Sanity check */ + if ( image->len < ( sizeof ( header ) + sizeof ( footer ) ) ) { + DBGC ( image, "GZIP %p image too short\n", image ); + return -ENOEXEC; + } + + /* Check magic header */ + copy_from_user ( &header.magic, image->data, 0, + sizeof ( header.magic ) ); + if ( header.magic != cpu_to_be16 ( GZIP_MAGIC ) ) { + DBGC ( image, "GZIP %p invalid magic\n", image ); + return -ENOEXEC; + } + + return 0; +} + +/** gzip image type */ +struct image_type gzip_image_type __image_type ( PROBE_NORMAL ) = { + .name = "gzip", + .probe = gzip_probe, + .extract = gzip_extract, + .exec = image_extract_exec, +}; diff --git a/src/image/png.c b/src/image/png.c index 5c4bcb3a0..d5cf7fd8f 100644 --- a/src/image/png.c +++ b/src/image/png.c @@ -924,9 +924,9 @@ static int png_pixbuf ( struct image *image, struct pixel_buffer **pixbuf ) { /* Extract chunk header */ remaining = ( image->len - png->offset ); - if ( remaining < sizeof ( header ) ) { - DBGC ( image, "PNG %s truncated chunk header at offset " - "%zd\n", image->name, png->offset ); + if ( remaining < ( sizeof ( header ) + sizeof ( footer ) ) ) { + DBGC ( image, "PNG %s truncated chunk header/footer " + "at offset %zd\n", image->name, png->offset ); rc = -EINVAL; goto err_truncated; } @@ -936,10 +936,10 @@ static int png_pixbuf ( struct image *image, struct pixel_buffer **pixbuf ) { /* Validate chunk length */ chunk_len = ntohl ( header.len ); - if ( remaining < ( sizeof ( header ) + chunk_len + + if ( chunk_len > ( remaining - sizeof ( header ) - sizeof ( footer ) ) ) { - DBGC ( image, "PNG %s truncated chunk data/footer at " - "offset %zd\n", image->name, png->offset ); + DBGC ( image, "PNG %s truncated chunk data at offset " + "%zd\n", image->name, png->offset ); rc = -EINVAL; goto err_truncated; } diff --git a/src/image/zlib.c b/src/image/zlib.c new file mode 100644 index 000000000..a42c47e1b --- /dev/null +++ b/src/image/zlib.c @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * zlib compressed images + * + */ + +/** + * Extract compressed data to image + * + * @v format Compression format code + * @v in Compressed input chunk + * @v extracted Extracted image + * @ret rc Return status code + */ +int zlib_deflate ( enum deflate_format format, struct deflate_chunk *in, + struct image *extracted ) { + struct deflate *deflate; + struct deflate_chunk out; + int rc; + + /* Allocate and initialise decompressor */ + deflate = zalloc ( sizeof ( *deflate ) ); + if ( ! deflate ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Decompress data, (re)allocating if necessary */ + while ( 1 ) { + + /* (Re)initialise decompressor */ + deflate_init ( deflate, format ); + + /* (Re)initialise input chunk */ + in->offset = 0; + + /* Initialise output chunk */ + deflate_chunk_init ( &out, extracted->data, 0, extracted->len ); + + /* Decompress data */ + if ( ( rc = deflate_inflate ( deflate, in, &out ) ) != 0 ) { + DBGC ( extracted, "ZLIB %p could not decompress: %s\n", + extracted, strerror ( rc ) ); + goto err_inflate; + } + + /* Check that decompression is valid */ + if ( ! deflate_finished ( deflate ) ) { + DBGC ( extracted, "ZLIB %p decompression incomplete\n", + extracted ); + rc = -EINVAL; + goto err_unfinished; + } + + /* Finish if output image size was correct */ + if ( out.offset == extracted->len ) + break; + + /* Otherwise, resize output image and retry */ + if ( ( rc = image_set_len ( extracted, out.offset ) ) != 0 ) { + DBGC ( extracted, "ZLIB %p could not resize: %s\n", + extracted, strerror ( rc ) ); + goto err_set_size; + } + } + + /* Success */ + rc = 0; + + err_set_size: + err_unfinished: + err_inflate: + free ( deflate ); + err_alloc: + return rc; +} + +/** + * Extract zlib image + * + * @v image Image + * @v extracted Extracted image + * @ret rc Return status code + */ +static int zlib_extract ( struct image *image, struct image *extracted ) { + struct deflate_chunk in; + int rc; + + /* Initialise input chunk */ + deflate_chunk_init ( &in, image->data, 0, image->len ); + + /* Decompress image */ + if ( ( rc = zlib_deflate ( DEFLATE_ZLIB, &in, extracted ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Probe zlib image + * + * @v image zlib image + * @ret rc Return status code + */ +static int zlib_probe ( struct image *image ) { + union zlib_magic magic; + + /* Sanity check */ + if ( image->len < sizeof ( magic ) ) { + DBGC ( image, "ZLIB %p image too short\n", image ); + return -ENOEXEC; + } + + /* Check magic header */ + copy_from_user ( &magic, image->data, 0, sizeof ( magic ) ); + if ( ! zlib_magic_is_valid ( &magic ) ) { + DBGC ( image, "ZLIB %p invalid magic data\n", image ); + return -ENOEXEC; + } + + return 0; +} + +/** zlib image type */ +struct image_type zlib_image_type __image_type ( PROBE_NORMAL ) = { + .name = "zlib", + .probe = zlib_probe, + .extract = zlib_extract, + .exec = image_extract_exec, +}; diff --git a/src/include/endian.h b/src/include/endian.h index 79c3163ee..bdae9de45 100644 --- a/src/include/endian.h +++ b/src/include/endian.h @@ -8,14 +8,18 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); * Little-endian systems should define BYTE_ORDER as LITTLE_ENDIAN. * This constant is intended to be used only at compile time. */ +#ifndef __LITTLE_ENDIAN #define __LITTLE_ENDIAN 0x44332211UL +#endif /** Constant representing big-endian byte order * * Big-endian systems should define BYTE_ORDER as BIG_ENDIAN. * This constant is intended to be used only at compile time. */ +#ifndef __BIG_ENDIAN #define __BIG_ENDIAN 0x11223344UL +#endif #include "bits/endian.h" diff --git a/src/include/errno.h b/src/include/errno.h index 342384fa4..decde38ed 100644 --- a/src/include/errno.h +++ b/src/include/errno.h @@ -259,7 +259,7 @@ static inline void eplatform_discard ( int dummy __unused, ... ) {} */ #define __einfo_error( einfo ) ( { \ __asm__ ( ".section \".einfo\", \"\", " PROGBITS_OPS "\n\t" \ - ".align 8\n\t" \ + ".balign 8\n\t" \ "\n1:\n\t" \ ".long ( 4f - 1b )\n\t" \ ".long %c0\n\t" \ @@ -268,7 +268,7 @@ static inline void eplatform_discard ( int dummy __unused, ... ) {} ".long %c1\n\t" \ "\n2:\t.asciz \"" __einfo_desc ( einfo ) "\"\n\t" \ "\n3:\t.asciz \"" __FILE__ "\"\n\t" \ - ".align 8\n\t" \ + ".balign 8\n\t" \ "\n4:\n\t" \ ".previous\n\t" : : \ "i" ( __einfo_errno ( einfo ) ), \ diff --git a/src/include/ipxe/acpi.h b/src/include/ipxe/acpi.h index 78f402530..81ef7ff76 100644 --- a/src/include/ipxe/acpi.h +++ b/src/include/ipxe/acpi.h @@ -19,6 +19,141 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include +/** An ACPI small resource descriptor header */ +struct acpi_small_resource { + /** Tag byte */ + uint8_t tag; +} __attribute__ (( packed )); + +/** ACPI small resource length mask */ +#define ACPI_SMALL_LEN_MASK 0x03 + +/** An ACPI end resource descriptor */ +#define ACPI_END_RESOURCE 0x78 + +/** An ACPI end resource descriptor */ +struct acpi_end_resource { + /** Header */ + struct acpi_small_resource hdr; + /** Checksum */ + uint8_t checksum; +} __attribute__ (( packed )); + +/** An ACPI large resource descriptor header */ +struct acpi_large_resource { + /** Tag byte */ + uint8_t tag; + /** Length of data items */ + uint16_t len; +} __attribute__ (( packed )); + +/** ACPI large resource flag */ +#define ACPI_LARGE 0x80 + +/** An ACPI QWORD address space resource descriptor */ +#define ACPI_QWORD_ADDRESS_SPACE_RESOURCE 0x8a + +/** An ACPI QWORD address space resource descriptor */ +struct acpi_qword_address_space_resource { + /** Header */ + struct acpi_large_resource hdr; + /** Resource type */ + uint8_t type; + /** General flags */ + uint8_t general; + /** Type-specific flags */ + uint8_t specific; + /** Granularity */ + uint64_t granularity; + /** Minimum address */ + uint64_t min; + /** Maximum address */ + uint64_t max; + /** Translation offset */ + uint64_t offset; + /** Length */ + uint64_t len; +} __attribute__ (( packed )); + +/** A memory address space type */ +#define ACPI_ADDRESS_TYPE_MEM 0x00 + +/** A bus number address space type */ +#define ACPI_ADDRESS_TYPE_BUS 0x02 + +/** An ACPI resource descriptor */ +union acpi_resource { + /** Tag byte */ + uint8_t tag; + /** Small resource descriptor */ + struct acpi_small_resource small; + /** End resource descriptor */ + struct acpi_end_resource end; + /** Large resource descriptor */ + struct acpi_large_resource large; + /** QWORD address space resource descriptor */ + struct acpi_qword_address_space_resource qword; +}; + +/** + * Get ACPI resource tag + * + * @v res ACPI resource descriptor + * @ret tag Resource tag + */ +static inline unsigned int acpi_resource_tag ( union acpi_resource *res ) { + + return ( ( res->tag & ACPI_LARGE ) ? + res->tag : ( res->tag & ~ACPI_SMALL_LEN_MASK ) ); +} + +/** + * Get length of ACPI small resource descriptor + * + * @v res Small resource descriptor + * @ret len Length of descriptor + */ +static inline size_t acpi_small_len ( struct acpi_small_resource *res ) { + + return ( sizeof ( *res ) + ( res->tag & ACPI_SMALL_LEN_MASK ) ); +} + +/** + * Get length of ACPI large resource descriptor + * + * @v res Large resource descriptor + * @ret len Length of descriptor + */ +static inline size_t acpi_large_len ( struct acpi_large_resource *res ) { + + return ( sizeof ( *res ) + le16_to_cpu ( res->len ) ); +} + +/** + * Get length of ACPI resource descriptor + * + * @v res ACPI resource descriptor + * @ret len Length of descriptor + */ +static inline size_t acpi_resource_len ( union acpi_resource *res ) { + + return ( ( res->tag & ACPI_LARGE ) ? + acpi_large_len ( &res->large ) : + acpi_small_len ( &res->small ) ); +} + +/** + * Get next ACPI resource descriptor + * + * @v res ACPI resource descriptor + * @ret next Next ACPI resource descriptor + */ +static inline union acpi_resource * +acpi_resource_next ( union acpi_resource *res ) { + + return ( ( ( void * ) res ) + acpi_resource_len ( res ) ); +} + /** * An ACPI description header * @@ -220,9 +355,12 @@ struct acpi_model { #define PROVIDE_ACPI_INLINE( _subsys, _api_func ) \ PROVIDE_SINGLE_API_INLINE ( ACPI_PREFIX_ ## _subsys, _api_func ) +extern userptr_t acpi_find_via_rsdt ( uint32_t signature, unsigned int index ); + /* Include all architecture-independent ACPI API headers */ #include #include +#include /* Include all architecture-dependent ACPI API headers */ #include @@ -234,13 +372,21 @@ struct acpi_model { */ userptr_t acpi_find_rsdt ( void ); +/** + * Locate ACPI table + * + * @v signature Requested table signature + * @v index Requested index of table with this signature + * @ret table Table, or UNULL if not found + */ +userptr_t acpi_find ( uint32_t signature, unsigned int index ); + extern struct acpi_descriptor * acpi_describe ( struct interface *interface ); #define acpi_describe_TYPE( object_type ) \ typeof ( struct acpi_descriptor * ( object_type ) ) extern void acpi_fix_checksum ( struct acpi_header *acpi ); -extern userptr_t acpi_find ( uint32_t signature, unsigned int index ); extern int acpi_sx ( uint32_t signature ); extern void acpi_add ( struct acpi_descriptor *desc ); extern void acpi_del ( struct acpi_descriptor *desc ); diff --git a/src/include/ipxe/aoe.h b/src/include/ipxe/aoe.h index a51044d15..14d11c5cb 100644 --- a/src/include/ipxe/aoe.h +++ b/src/include/ipxe/aoe.h @@ -15,6 +15,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include +#include /** An AoE config command */ struct aoecfg { @@ -109,6 +111,35 @@ struct aoehdr { /** Maximum number of sectors per packet */ #define AOE_MAX_COUNT 2 +/** An AoE device */ +struct aoe_device { + /** Reference counter */ + struct refcnt refcnt; + + /** Network device */ + struct net_device *netdev; + /** ATA command issuing interface */ + struct interface ata; + + /** Major number */ + uint16_t major; + /** Minor number */ + uint8_t minor; + /** Target MAC address */ + uint8_t target[MAX_LL_ADDR_LEN]; + + /** Saved timeout value */ + unsigned long timeout; + + /** Configuration command interface */ + struct interface config; + /** Device is configued */ + int configured; + + /** ACPI descriptor */ + struct acpi_descriptor desc; +}; + /** AoE boot firmware table signature */ #define ABFT_SIG ACPI_SIGNATURE ( 'a', 'B', 'F', 'T' ) diff --git a/src/include/ipxe/asn1.h b/src/include/ipxe/asn1.h index 24caecdc5..fdf06f109 100644 --- a/src/include/ipxe/asn1.h +++ b/src/include/ipxe/asn1.h @@ -11,6 +11,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include +#include #include #include #include @@ -75,6 +76,9 @@ struct asn1_builder_header { /** ASN.1 enumeration */ #define ASN1_ENUMERATED 0x0a +/** ASN.1 UTF-8 string */ +#define ASN1_UTF8_STRING 0x0c + /** ASN.1 UTC time */ #define ASN1_UTC_TIME 0x17 @@ -96,6 +100,10 @@ struct asn1_builder_header { /** ASN.1 "any tag" magic value */ #define ASN1_ANY -1U +/** Construct a short ASN.1 value */ +#define ASN1_SHORT( tag, ... ) \ + (tag), VA_ARG_COUNT ( __VA_ARGS__ ), __VA_ARGS__ + /** Initial OID byte */ #define ASN1_OID_INITIAL( first, second ) ( ( (first) * 40 ) + (second) ) @@ -288,10 +296,10 @@ struct asn1_builder_header { ASN1_OID_INITIAL ( 2, 5 ), ASN1_OID_SINGLE ( 29 ), \ ASN1_OID_SINGLE ( 17 ) -/** Define an ASN.1 cursor containing an OID */ -#define ASN1_OID_CURSOR( oid_value ) { \ - .data = oid_value, \ - .len = sizeof ( oid_value ), \ +/** Define an ASN.1 cursor for a static value */ +#define ASN1_CURSOR( value ) { \ + .data = value, \ + .len = sizeof ( value ), \ } /** An ASN.1 OID-identified algorithm */ @@ -312,6 +320,29 @@ struct asn1_algorithm { /** Declare an ASN.1 OID-identified algorithm */ #define __asn1_algorithm __table_entry ( ASN1_ALGORITHMS, 01 ) +/* ASN.1 OID-identified algorithms */ +extern struct asn1_algorithm rsa_encryption_algorithm __asn1_algorithm; +extern struct asn1_algorithm md5_with_rsa_encryption_algorithm __asn1_algorithm; +extern struct asn1_algorithm +sha1_with_rsa_encryption_algorithm __asn1_algorithm; +extern struct asn1_algorithm +sha256_with_rsa_encryption_algorithm __asn1_algorithm; +extern struct asn1_algorithm +sha384_with_rsa_encryption_algorithm __asn1_algorithm; +extern struct asn1_algorithm +sha512_with_rsa_encryption_algorithm __asn1_algorithm; +extern struct asn1_algorithm +sha224_with_rsa_encryption_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_md4_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_md5_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_sha1_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_sha256_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_sha384_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_sha512_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_sha224_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_sha512_224_algorithm __asn1_algorithm; +extern struct asn1_algorithm oid_sha512_256_algorithm __asn1_algorithm; + /** An ASN.1 bit string */ struct asn1_bit_string { /** Data */ diff --git a/src/include/ipxe/cachedhcp.h b/src/include/ipxe/cachedhcp.h new file mode 100644 index 000000000..39ce74543 --- /dev/null +++ b/src/include/ipxe/cachedhcp.h @@ -0,0 +1,24 @@ +#ifndef _IPXE_CACHEDHCP_H +#define _IPXE_CACHEDHCP_H + +/** @file + * + * Cached DHCP packet + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +struct cached_dhcp_packet; + +extern struct cached_dhcp_packet cached_dhcpack; +extern struct cached_dhcp_packet cached_proxydhcp; +extern struct cached_dhcp_packet cached_pxebs; + +extern int cachedhcp_record ( struct cached_dhcp_packet *cache, userptr_t data, + size_t max_len ); + +#endif /* _IPXE_CACHEDHCP_H */ diff --git a/src/include/ipxe/certstore.h b/src/include/ipxe/certstore.h index e4c789cfd..ce96666cf 100644 --- a/src/include/ipxe/certstore.h +++ b/src/include/ipxe/certstore.h @@ -11,11 +11,12 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include +#include extern struct x509_chain certstore; extern struct x509_certificate * certstore_find ( struct asn1_cursor *raw ); -extern struct x509_certificate * certstore_find_key ( struct asn1_cursor *key ); +extern struct x509_certificate * certstore_find_key ( struct private_key *key ); extern void certstore_add ( struct x509_certificate *cert ); extern void certstore_del ( struct x509_certificate *cert ); diff --git a/src/include/ipxe/cpio.h b/src/include/ipxe/cpio.h index 0637c531d..9c5e22d5a 100644 --- a/src/include/ipxe/cpio.h +++ b/src/include/ipxe/cpio.h @@ -9,6 +9,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +#include + /** A CPIO archive header * * All field are hexadecimal ASCII numbers padded with '0' on the @@ -48,6 +50,25 @@ struct cpio_header { /** CPIO magic */ #define CPIO_MAGIC "070701" +/** CPIO header length alignment */ +#define CPIO_ALIGN 4 + +/** Alignment for CPIO archives within an initrd */ +#define INITRD_ALIGN 4096 + +/** + * Get CPIO image name + * + * @v image Image + * @ret name Image name (not NUL terminated) + */ +static inline __attribute__ (( always_inline )) const char * +cpio_name ( struct image *image ) { + return image->cmdline; +} + extern void cpio_set_field ( char *field, unsigned long value ); +extern size_t cpio_name_len ( struct image *image ); +extern size_t cpio_header ( struct image *image, struct cpio_header *cpio ); #endif /* _IPXE_CPIO_H */ diff --git a/src/include/ipxe/dhcppkt.h b/src/include/ipxe/dhcppkt.h index f13dfc93d..86075960a 100644 --- a/src/include/ipxe/dhcppkt.h +++ b/src/include/ipxe/dhcppkt.h @@ -56,7 +56,7 @@ dhcppkt_put ( struct dhcp_packet *dhcppkt ) { * @v dhcppkt DHCP packet * @ret len Used length */ -static inline int dhcppkt_len ( struct dhcp_packet *dhcppkt ) { +static inline size_t dhcppkt_len ( struct dhcp_packet *dhcppkt ) { return ( offsetof ( struct dhcphdr, options ) + dhcppkt->options.used_len ); } diff --git a/src/include/ipxe/dma.h b/src/include/ipxe/dma.h new file mode 100644 index 000000000..385e4baf7 --- /dev/null +++ b/src/include/ipxe/dma.h @@ -0,0 +1,480 @@ +#ifndef _IPXE_DMA_H +#define _IPXE_DMA_H + +/** @file + * + * DMA mappings + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +#ifdef DMAAPI_OP +#define DMAAPI_PREFIX_op +#else +#define DMAAPI_PREFIX_op __op_ +#endif + +#ifdef DMAAPI_FLAT +#define DMAAPI_PREFIX_flat +#else +#define DMAAPI_PREFIX_flat __flat_ +#endif + +/** A DMA mapping */ +struct dma_mapping { + /** Address offset + * + * This is the value that must be added to a physical address + * within the mapping in order to produce the corresponding + * device-side DMA address. + */ + physaddr_t offset; + /** DMA device (if unmapping is required) */ + struct dma_device *dma; + /** Platform mapping token */ + void *token; +}; + +/** A DMA-capable device */ +struct dma_device { + /** DMA operations */ + struct dma_operations *op; + /** Addressable space mask */ + physaddr_t mask; + /** Total number of mappings (for debugging) */ + unsigned int mapped; + /** Total number of allocations (for debugging) */ + unsigned int allocated; +}; + +/** DMA operations */ +struct dma_operations { + /** + * Map buffer for DMA + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v addr Buffer address + * @v len Length of buffer + * @v flags Mapping flags + * @ret rc Return status code + */ + int ( * map ) ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ); + /** + * Unmap buffer + * + * @v dma DMA device + * @v map DMA mapping + */ + void ( * unmap ) ( struct dma_device *dma, struct dma_mapping *map ); + /** + * Allocate and map DMA-coherent buffer + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ + void * ( * alloc ) ( struct dma_device *dma, struct dma_mapping *map, + size_t len, size_t align ); + /** + * Unmap and free DMA-coherent buffer + * + * @v dma DMA device + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ + void ( * free ) ( struct dma_device *dma, struct dma_mapping *map, + void *addr, size_t len ); + /** + * Allocate and map DMA-coherent buffer from external (user) memory + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ + userptr_t ( * umalloc ) ( struct dma_device *dma, + struct dma_mapping *map, + size_t len, size_t align ); + /** + * Unmap and free DMA-coherent buffer from external (user) memory + * + * @v dma DMA device + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ + void ( * ufree ) ( struct dma_device *dma, struct dma_mapping *map, + userptr_t addr, size_t len ); + /** + * Set addressable space mask + * + * @v dma DMA device + * @v mask Addressable space mask + */ + void ( * set_mask ) ( struct dma_device *dma, physaddr_t mask ); +}; + +/** Device will read data from host memory */ +#define DMA_TX 0x01 + +/** Device will write data to host memory */ +#define DMA_RX 0x02 + +/** Device will both read data from and write data to host memory */ +#define DMA_BI ( DMA_TX | DMA_RX ) + +/** + * Calculate static inline DMA I/O API function name + * + * @v _prefix Subsystem prefix + * @v _api_func API function + * @ret _subsys_func Subsystem API function + */ +#define DMAAPI_INLINE( _subsys, _api_func ) \ + SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func ) + +/** + * Provide a DMA I/O API implementation + * + * @v _prefix Subsystem prefix + * @v _api_func API function + * @v _func Implementing function + */ +#define PROVIDE_DMAAPI( _subsys, _api_func, _func ) \ + PROVIDE_SINGLE_API ( DMAAPI_PREFIX_ ## _subsys, _api_func, _func ) + +/** + * Provide a static inline DMA I/O API implementation + * + * @v _prefix Subsystem prefix + * @v _api_func API function + */ +#define PROVIDE_DMAAPI_INLINE( _subsys, _api_func ) \ + PROVIDE_SINGLE_API_INLINE ( DMAAPI_PREFIX_ ## _subsys, _api_func ) + +/** + * Map buffer for DMA + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v addr Buffer address + * @v len Length of buffer + * @v flags Mapping flags + * @ret rc Return status code + */ +static inline __always_inline int +DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma, + struct dma_mapping *map, + physaddr_t addr __unused, + size_t len __unused, int flags __unused ) { + + /* Increment mapping count (for debugging) */ + if ( DBG_LOG ) { + map->dma = dma; + dma->mapped++; + } + + return 0; +} + +/** + * Unmap buffer + * + * @v map DMA mapping + */ +static inline __always_inline void +DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_mapping *map ) { + + /* Decrement mapping count (for debugging) */ + if ( DBG_LOG ) { + assert ( map->dma != NULL ); + map->dma->mapped--; + map->dma = NULL; + } +} + +/** + * Allocate and map DMA-coherent buffer + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +static inline __always_inline void * +DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma, + struct dma_mapping *map, + size_t len, size_t align ) { + void *addr; + + /* Allocate buffer */ + addr = malloc_phys ( len, align ); + + /* Increment mapping count (for debugging) */ + if ( DBG_LOG && addr ) { + map->dma = dma; + dma->mapped++; + } + + return addr; +} + +/** + * Unmap and free DMA-coherent buffer + * + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +static inline __always_inline void +DMAAPI_INLINE ( flat, dma_free ) ( struct dma_mapping *map, + void *addr, size_t len ) { + + /* Free buffer */ + free_phys ( addr, len ); + + /* Decrement mapping count (for debugging) */ + if ( DBG_LOG ) { + assert ( map->dma != NULL ); + map->dma->mapped--; + map->dma = NULL; + } +} + +/** + * Allocate and map DMA-coherent buffer from external (user) memory + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +static inline __always_inline userptr_t +DMAAPI_INLINE ( flat, dma_umalloc ) ( struct dma_device *dma, + struct dma_mapping *map, + size_t len, size_t align __unused ) { + userptr_t addr; + + /* Allocate buffer */ + addr = umalloc ( len ); + + /* Increment mapping count (for debugging) */ + if ( DBG_LOG && addr ) { + map->dma = dma; + dma->mapped++; + } + + return addr; +} + +/** + * Unmap and free DMA-coherent buffer from external (user) memory + * + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +static inline __always_inline void +DMAAPI_INLINE ( flat, dma_ufree ) ( struct dma_mapping *map, + userptr_t addr, size_t len __unused ) { + + /* Free buffer */ + ufree ( addr ); + + /* Decrement mapping count (for debugging) */ + if ( DBG_LOG ) { + assert ( map->dma != NULL ); + map->dma->mapped--; + map->dma = NULL; + } +} + +/** + * Set addressable space mask + * + * @v dma DMA device + * @v mask Addressable space mask + */ +static inline __always_inline void +DMAAPI_INLINE ( flat, dma_set_mask ) ( struct dma_device *dma __unused, + physaddr_t mask __unused ) { + + /* Nothing to do */ +} + +/** + * Get DMA address from physical address + * + * @v map DMA mapping + * @v addr Physical address within the mapped region + * @ret addr Device-side DMA address + */ +static inline __always_inline physaddr_t +DMAAPI_INLINE ( flat, dma_phys ) ( struct dma_mapping *map __unused, + physaddr_t addr ) { + + /* Use physical address as device address */ + return addr; +} + +/** + * Get DMA address from physical address + * + * @v map DMA mapping + * @v addr Physical address within the mapped region + * @ret addr Device-side DMA address + */ +static inline __always_inline physaddr_t +DMAAPI_INLINE ( op, dma_phys ) ( struct dma_mapping *map, physaddr_t addr ) { + + /* Adjust physical address using mapping offset */ + return ( addr + map->offset ); +} + +/** + * Map buffer for DMA + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v addr Buffer address + * @v len Length of buffer + * @v flags Mapping flags + * @ret rc Return status code + */ +int dma_map ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ); + +/** + * Unmap buffer + * + * @v map DMA mapping + */ +void dma_unmap ( struct dma_mapping *map ); + +/** + * Allocate and map DMA-coherent buffer + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +void * dma_alloc ( struct dma_device *dma, struct dma_mapping *map, + size_t len, size_t align ); + +/** + * Unmap and free DMA-coherent buffer + * + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +void dma_free ( struct dma_mapping *map, void *addr, size_t len ); + +/** + * Allocate and map DMA-coherent buffer from external (user) memory + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +userptr_t dma_umalloc ( struct dma_device *dma, struct dma_mapping *map, + size_t len, size_t align ); + +/** + * Unmap and free DMA-coherent buffer from external (user) memory + * + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +void dma_ufree ( struct dma_mapping *map, userptr_t addr, size_t len ); + +/** + * Set addressable space mask + * + * @v dma DMA device + * @v mask Addressable space mask + */ +void dma_set_mask ( struct dma_device *dma, physaddr_t mask ); + +/** + * Get DMA address from physical address + * + * @v map DMA mapping + * @v addr Physical address within the mapped region + * @ret addr Device-side DMA address + */ +physaddr_t dma_phys ( struct dma_mapping *map, physaddr_t addr ); + +/** + * Get DMA address from virtual address + * + * @v map DMA mapping + * @v addr Virtual address within the mapped region + * @ret addr Device-side DMA address + */ +static inline __always_inline physaddr_t dma ( struct dma_mapping *map, + void *addr ) { + + /* Get DMA address from corresponding physical address */ + return dma_phys ( map, virt_to_phys ( addr ) ); +} + +/** + * Check if DMA unmapping is required + * + * @v map DMA mapping + * @v unmap Unmapping is required + */ +static inline __always_inline int dma_mapped ( struct dma_mapping *map ) { + + /* Unmapping is required if a DMA device was recorded */ + return ( map->dma != NULL ); +} + +/** + * Initialise DMA device + * + * @v dma DMA device + * @v op DMA operations + */ +static inline __always_inline void dma_init ( struct dma_device *dma, + struct dma_operations *op ) { + + /* Set operations table */ + dma->op = op; +} + +/** + * Set 64-bit addressable space mask + * + * @v dma DMA device + */ +static inline __always_inline void +dma_set_mask_64bit ( struct dma_device *dma ) { + + /* Set mask to maximum physical address */ + dma_set_mask ( dma, ~( ( physaddr_t ) 0 ) ); +} + +#endif /* _IPXE_DMA_H */ diff --git a/src/include/ipxe/eap.h b/src/include/ipxe/eap.h new file mode 100644 index 000000000..6fe70189b --- /dev/null +++ b/src/include/ipxe/eap.h @@ -0,0 +1,69 @@ +#ifndef _IPXE_EAP_H +#define _IPXE_EAP_H + +/** @file + * + * Extensible Authentication Protocol + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** EAP header */ +struct eap_header { + /** Code */ + uint8_t code; + /** Identifier */ + uint8_t id; + /** Length */ + uint16_t len; +} __attribute__ (( packed )); + +/** EAP request */ +#define EAP_CODE_REQUEST 1 + +/** EAP request */ +struct eap_request { + /** Header */ + struct eap_header hdr; + /** Type */ + uint8_t type; +} __attribute__ (( packed )); + +/** EAP identity */ +#define EAP_TYPE_IDENTITY 1 + +/** EAP success */ +#define EAP_CODE_SUCCESS 3 + +/** EAP failure */ +#define EAP_CODE_FAILURE 4 + +/** EAP packet */ +union eap_packet { + /** Header */ + struct eap_header hdr; + /** Request */ + struct eap_request req; +}; + +/** Link block timeout + * + * We mark the link as blocked upon receiving a Request-Identity, on + * the basis that this most likely indicates that the switch will not + * yet be forwarding packets. + * + * There is no way to tell how frequently the Request-Identity packet + * will be retransmitted by the switch. The default value for Cisco + * switches seems to be 30 seconds, so treat the link as blocked for + * 45 seconds. + */ +#define EAP_BLOCK_TIMEOUT ( 45 * TICKS_PER_SEC ) + +extern int eap_rx ( struct net_device *netdev, const void *data, size_t len ); + +#endif /* _IPXE_EAP_H */ diff --git a/src/include/ipxe/eapol.h b/src/include/ipxe/eapol.h index 5ca9c2815..952d6c752 100644 --- a/src/include/ipxe/eapol.h +++ b/src/include/ipxe/eapol.h @@ -1,114 +1,61 @@ -/* - * Copyright (c) 2009 Joshua Oreman . - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - #ifndef _IPXE_EAPOL_H #define _IPXE_EAPOL_H /** @file * - * Definitions for EAPOL (Extensible Authentication Protocol over - * LANs) frames. Definitions for the packets usually encapsulated in - * them are elsewhere. + * Extensible Authentication Protocol over LAN (EAPoL) + * */ -#include +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + #include +#include +#include -FILE_LICENCE ( GPL2_OR_LATER ); - - -/** - * @defgroup eapol_type EAPOL archetype identifiers - * @{ - */ -#define EAPOL_TYPE_EAP 0 /**< EAP authentication handshake packet */ -#define EAPOL_TYPE_START 1 /**< Request by Peer to begin (no data) */ -#define EAPOL_TYPE_LOGOFF 2 /**< Request by Peer to terminate (no data) */ -#define EAPOL_TYPE_KEY 3 /**< EAPOL-Key packet */ -/** @} */ - -/** Expected EAPOL version field value - * - * Version 2 is often seen and has no format differences from version 1; - * however, many older APs will completely drop version-2 packets, so - * we advertise ourselves as version 1. - */ -#define EAPOL_THIS_VERSION 1 - -/** Length of an EAPOL frame header */ -#define EAPOL_HDR_LEN 4 - -/** An EAPOL frame - * - * This may encapsulate an eap_pkt, an eapol_key_pkt, or a Start or - * Logoff request with no data attached. It is transmitted directly in - * an Ethernet frame, with no IP packet header. - */ -struct eapol_frame -{ - /** EAPOL version identifier, always 1 */ - u8 version; - - /** EAPOL archetype identifier indicating format of payload */ - u8 type; - - /** Length of payload, in network byte order */ - u16 length; - - /** Payload, if @a type is EAP or EAPOL-Key */ - u8 data[0]; +/** EAPoL header */ +struct eapol_header { + /** Version */ + uint8_t version; + /** Type */ + uint8_t type; + /** Payload length */ + uint16_t len; } __attribute__ (( packed )); +/** 802.1X-2001 */ +#define EAPOL_VERSION_2001 1 -/** An EAPOL frame type handler - * - * Normally there will be at most two of these, one for EAP and one - * for EAPOL-Key frames. The EAPOL interface code handles Start and - * Logoff directly. - */ -struct eapol_handler -{ - /** EAPOL archetype identifier for payload this handler will handle */ - u8 type; +/** EAPoL-encapsulated EAP packets */ +#define EAPOL_TYPE_EAP 0 - /** Receive EAPOL-encapsulated packet of specified type +/** EAPoL key */ +#define EAPOL_TYPE_KEY 5 + +/** An EAPoL handler */ +struct eapol_handler { + /** Type */ + uint8_t type; + /** + * Process received packet * - * @v iob I/O buffer containing packet payload - * @v netdev Network device from which packet was received - * @V ll_dest Destination link-layer address - * @v ll_source Source link-layer address - * @ret rc Return status code + * @v iobuf I/O buffer + * @v netdev Network device + * @v ll_source Link-layer source address + * @ret rc Return status code * - * The I/O buffer will have the EAPOL header pulled off it, so - * @c iob->data points to the first byte of the payload. - * - * This function takes ownership of the I/O buffer passed to it. + * This method takes ownership of the I/O buffer. */ - int ( * rx ) ( struct io_buffer *iob, struct net_device *netdev, - const void *ll_dest, const void *ll_source ); + int ( * rx ) ( struct io_buffer *iobuf, struct net_device *netdev, + const void *ll_source ); }; -#define EAPOL_HANDLERS __table ( struct eapol_handler, "eapol_handlers" ) -#define __eapol_handler __table_entry ( EAPOL_HANDLERS, 01 ) +/** EAPoL handler table */ +#define EAPOL_HANDLERS __table ( struct eapol_handler, "eapol_handlers" ) +/** Declare an EAPoL handler */ +#define __eapol_handler __table_entry ( EAPOL_HANDLERS, 01 ) extern struct net_protocol eapol_protocol __net_protocol; - #endif /* _IPXE_EAPOL_H */ diff --git a/src/include/ipxe/efi/efi.h b/src/include/ipxe/efi/efi.h index 669e5364a..a83fa0f27 100644 --- a/src/include/ipxe/efi/efi.h +++ b/src/include/ipxe/efi/efi.h @@ -29,11 +29,6 @@ FILE_LICENCE ( GPL2_OR_LATER ); /* EFI headers redefine ARRAY_SIZE */ #undef ARRAY_SIZE -/* EFI headers expect ICC to define __GNUC__ */ -#if defined ( __ICC ) && ! defined ( __GNUC__ ) -#define __GNUC__ 1 -#endif - /* EFI headers think your compiler uses the MS ABI by default on X64 */ #if __x86_64__ #define EFIAPI __attribute__((ms_abi)) @@ -65,6 +60,16 @@ typedef struct {} *EFI_HANDLE; #include #include +#include +#include + +/** An EFI saved task priority level */ +struct efi_saved_tpl { + /** Current external TPL */ + EFI_TPL current; + /** Previous external TPL */ + EFI_TPL previous; +}; /** An EFI protocol used by iPXE */ struct efi_protocol { @@ -218,9 +223,11 @@ extern EFI_HANDLE efi_image_handle; extern EFI_LOADED_IMAGE_PROTOCOL *efi_loaded_image; extern EFI_DEVICE_PATH_PROTOCOL *efi_loaded_image_path; extern EFI_SYSTEM_TABLE *efi_systab; +extern EFI_TPL efi_external_tpl; extern int efi_shutdown_in_progress; -extern const __attribute__ (( pure )) char * efi_guid_ntoa ( EFI_GUID *guid ); +extern const __attribute__ (( pure )) char * +efi_guid_ntoa ( CONST EFI_GUID *guid ); extern const __attribute__ (( pure )) char * efi_locate_search_type_name ( EFI_LOCATE_SEARCH_TYPE search_type ); extern const __attribute__ (( pure )) char * @@ -230,9 +237,19 @@ efi_devpath_text ( EFI_DEVICE_PATH_PROTOCOL *path ); extern const __attribute__ (( pure )) char * efi_handle_name ( EFI_HANDLE handle ); +extern void dbg_efi_opener ( EFI_HANDLE handle, EFI_GUID *protocol, + EFI_OPEN_PROTOCOL_INFORMATION_ENTRY *opener ); extern void dbg_efi_openers ( EFI_HANDLE handle, EFI_GUID *protocol ); extern void dbg_efi_protocols ( EFI_HANDLE handle ); +#define DBG_EFI_OPENER_IF( level, handle, protocol, \ + opener ) do { \ + if ( DBG_ ## level ) { \ + dbg_efi_opener ( handle, protocol, \ + opener ); \ + } \ + } while ( 0 ) + #define DBG_EFI_OPENERS_IF( level, handle, protocol ) do { \ if ( DBG_ ## level ) { \ dbg_efi_openers ( handle, protocol ); \ @@ -245,6 +262,12 @@ extern void dbg_efi_protocols ( EFI_HANDLE handle ); } \ } while ( 0 ) +#define DBGC_EFI_OPENER_IF( level, id, ... ) do { \ + DBG_AC_IF ( level, id ); \ + DBG_EFI_OPENER_IF ( level, __VA_ARGS__ ); \ + DBG_DC_IF ( level ); \ + } while ( 0 ) + #define DBGC_EFI_OPENERS_IF( level, id, ... ) do { \ DBG_AC_IF ( level, id ); \ DBG_EFI_OPENERS_IF ( level, __VA_ARGS__ ); \ @@ -257,22 +280,67 @@ extern void dbg_efi_protocols ( EFI_HANDLE handle ); DBG_DC_IF ( level ); \ } while ( 0 ) +#define DBGC_EFI_OPENER( ... ) \ + DBGC_EFI_OPENER_IF ( LOG, ##__VA_ARGS__ ) #define DBGC_EFI_OPENERS( ... ) \ DBGC_EFI_OPENERS_IF ( LOG, ##__VA_ARGS__ ) #define DBGC_EFI_PROTOCOLS( ... ) \ DBGC_EFI_PROTOCOLS_IF ( LOG, ##__VA_ARGS__ ) +#define DBGC2_EFI_OPENER( ... ) \ + DBGC_EFI_OPENER_IF ( EXTRA, ##__VA_ARGS__ ) #define DBGC2_EFI_OPENERS( ... ) \ DBGC_EFI_OPENERS_IF ( EXTRA, ##__VA_ARGS__ ) #define DBGC2_EFI_PROTOCOLS( ... ) \ DBGC_EFI_PROTOCOLS_IF ( EXTRA, ##__VA_ARGS__ ) +#define DBGCP_EFI_OPENER( ... ) \ + DBGC_EFI_OPENER_IF ( PROFILE, ##__VA_ARGS__ ) #define DBGCP_EFI_OPENERS( ... ) \ DBGC_EFI_OPENERS_IF ( PROFILE, ##__VA_ARGS__ ) #define DBGCP_EFI_PROTOCOLS( ... ) \ DBGC_EFI_PROTOCOLS_IF ( PROFILE, ##__VA_ARGS__ ) +/* Allow for EFI-only interface operations */ +#ifdef PLATFORM_efi +#define EFI_INTF_OP INTF_OP +#else +#define EFI_INTF_OP UNUSED_INTF_OP +#endif + +extern unsigned long __stack_chk_guard; +extern unsigned long efi_stack_cookie ( EFI_HANDLE handle ); +extern void __stack_chk_fail ( void ); + +/** + * Initialise stack cookie + * + * @v handle Image handle + */ +static inline __attribute__ (( always_inline )) void +efi_init_stack_guard ( EFI_HANDLE handle ) { + + /* The calling function must not itself use stack protection, + * since the change in the stack guard value would trigger a + * false positive. + * + * There is unfortunately no way to annotate a function to + * exclude the use of stack protection. We must therefore + * rely on correctly anticipating the compiler's decision on + * the use of stack protection. + * + * The calculation of the stack cookie value deliberately + * takes the address of a stack variable (to provide an + * additional source of entropy). This operation would + * trigger the application of stack protection to the calling + * function, and so must be externalised. + */ + __stack_chk_guard = efi_stack_cookie ( handle ); +} + extern EFI_STATUS efi_init ( EFI_HANDLE image_handle, EFI_SYSTEM_TABLE *systab ); +extern void efi_raise_tpl ( struct efi_saved_tpl *tpl ); +extern void efi_restore_tpl ( struct efi_saved_tpl *tpl ); #endif /* _IPXE_EFI_H */ diff --git a/src/include/ipxe/efi/efi_acpi.h b/src/include/ipxe/efi/efi_acpi.h index 01456f137..a698863a6 100644 --- a/src/include/ipxe/efi/efi_acpi.h +++ b/src/include/ipxe/efi/efi_acpi.h @@ -15,4 +15,17 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ACPI_PREFIX_efi __efi_ #endif +/** + * Locate ACPI table + * + * @v signature Requested table signature + * @v index Requested index of table with this signature + * @ret table Table, or UNULL if not found + */ +static inline __attribute__ (( always_inline )) userptr_t +ACPI_INLINE ( efi, acpi_find ) ( uint32_t signature, unsigned int index ) { + + return acpi_find_via_rsdt ( signature, index ); +} + #endif /* _IPXE_EFI_ACPI_H */ diff --git a/src/include/ipxe/efi/efi_autoboot.h b/src/include/ipxe/efi/efi_autoboot.h index 1d5ddc8c3..706885e28 100644 --- a/src/include/ipxe/efi/efi_autoboot.h +++ b/src/include/ipxe/efi/efi_autoboot.h @@ -9,6 +9,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); -extern void efi_set_autoboot ( void ); +#include + +extern int efi_set_autoboot_ll_addr ( EFI_HANDLE device ); #endif /* _IPXE_EFI_AUTOBOOT_H */ diff --git a/src/include/ipxe/efi/efi_autoexec.h b/src/include/ipxe/efi/efi_autoexec.h new file mode 100644 index 000000000..1f93b41cd --- /dev/null +++ b/src/include/ipxe/efi/efi_autoexec.h @@ -0,0 +1,16 @@ +#ifndef _IPXE_EFI_AUTOEXEC_H +#define _IPXE_EFI_AUTOEXEC_H + +/** @file + * + * EFI autoexec script + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern int efi_autoexec_load ( EFI_HANDLE device ); + +#endif /* _IPXE_EFI_AUTOEXEC_H */ diff --git a/src/include/ipxe/efi/efi_blacklist.h b/src/include/ipxe/efi/efi_blacklist.h deleted file mode 100644 index c5a5a61dc..000000000 --- a/src/include/ipxe/efi/efi_blacklist.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _IPXE_EFI_BLACKLIST_H -#define _IPXE_EFI_BLACKLIST_H - -/** @file - * - * EFI driver blacklist - */ - -FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); - -extern void efi_unload_blacklist ( void ); - -#endif /* _IPXE_EFI_BLACKLIST_H */ diff --git a/src/include/ipxe/efi/efi_cachedhcp.h b/src/include/ipxe/efi/efi_cachedhcp.h new file mode 100644 index 000000000..cd60d4095 --- /dev/null +++ b/src/include/ipxe/efi/efi_cachedhcp.h @@ -0,0 +1,16 @@ +#ifndef _IPXE_EFI_CACHEDHCP_H +#define _IPXE_EFI_CACHEDHCP_H + +/** @file + * + * EFI cached DHCP packet + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern int efi_cachedhcp_record ( EFI_HANDLE device ); + +#endif /* _IPXE_EFI_CACHEDHCP_H */ diff --git a/src/include/ipxe/efi/efi_null.h b/src/include/ipxe/efi/efi_null.h new file mode 100644 index 000000000..297457081 --- /dev/null +++ b/src/include/ipxe/efi/efi_null.h @@ -0,0 +1,33 @@ +#ifndef _IPXE_EFI_NULL_H +#define _IPXE_EFI_NULL_H + +/** @file + * + * EFI null interfaces + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void efi_nullify_snp ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ); +extern void efi_nullify_nii ( EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL *nii ); +extern void efi_nullify_name2 ( EFI_COMPONENT_NAME2_PROTOCOL *name2 ); +extern void efi_nullify_load_file ( EFI_LOAD_FILE_PROTOCOL *load_file ); +extern void efi_nullify_hii ( EFI_HII_CONFIG_ACCESS_PROTOCOL *hii ); +extern void efi_nullify_block ( EFI_BLOCK_IO_PROTOCOL *block ); +extern void efi_nullify_pxe ( EFI_PXE_BASE_CODE_PROTOCOL *pxe ); +extern void efi_nullify_apple ( EFI_APPLE_NET_BOOT_PROTOCOL *apple ); +extern void efi_nullify_usbio ( EFI_USB_IO_PROTOCOL *usbio ); + +#endif /* _IPXE_EFI_NULL_H */ diff --git a/src/include/ipxe/efi/efi_path.h b/src/include/ipxe/efi/efi_path.h new file mode 100644 index 000000000..76ded728c --- /dev/null +++ b/src/include/ipxe/efi/efi_path.h @@ -0,0 +1,43 @@ +#ifndef _IPXE_EFI_PATH_H +#define _IPXE_EFI_PATH_H + +/** @file + * + * EFI device paths + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +struct net_device; +struct uri; +struct iscsi_session; +struct aoe_device; +struct fcp_description; +struct ib_srp_device; +struct usb_function; + +extern EFI_DEVICE_PATH_PROTOCOL * +efi_path_end ( EFI_DEVICE_PATH_PROTOCOL *path ); +extern size_t efi_path_len ( EFI_DEVICE_PATH_PROTOCOL *path ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_paths ( EFI_DEVICE_PATH_PROTOCOL *first, + ... ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_netdev_path ( struct net_device *netdev ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_uri_path ( struct uri *uri ); +extern EFI_DEVICE_PATH_PROTOCOL * +efi_iscsi_path ( struct iscsi_session *iscsi ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_aoe_path ( struct aoe_device *aoedev ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_fcp_path ( struct fcp_description *desc ); +extern EFI_DEVICE_PATH_PROTOCOL * +efi_ib_srp_path ( struct ib_srp_device *ib_srp ); +extern EFI_DEVICE_PATH_PROTOCOL * efi_usb_path ( struct usb_function *func ); + +extern EFI_DEVICE_PATH_PROTOCOL * efi_describe ( struct interface *interface ); +#define efi_describe_TYPE( object_type ) \ + typeof ( EFI_DEVICE_PATH_PROTOCOL * ( object_type ) ) + +#endif /* _IPXE_EFI_PATH_H */ diff --git a/src/include/ipxe/efi/efi_pci.h b/src/include/ipxe/efi/efi_pci.h index 6dd945f05..2ea1a8f0e 100644 --- a/src/include/ipxe/efi/efi_pci.h +++ b/src/include/ipxe/efi/efi_pci.h @@ -17,9 +17,17 @@ static inline EFIAPI uint64_t LShiftU64 ( UINT64 value, UINTN shift ) { return ( value << shift ); } +/** An EFI PCI device */ +struct efi_pci_device { + /** PCI device */ + struct pci_device pci; + /** PCI I/O protocol */ + EFI_PCI_IO_PROTOCOL *io; +}; + extern int efipci_open ( EFI_HANDLE device, UINT32 attributes, - struct pci_device *pci ); + struct efi_pci_device *efipci ); extern void efipci_close ( EFI_HANDLE device ); -extern int efipci_info ( EFI_HANDLE device, struct pci_device *pci ); +extern int efipci_info ( EFI_HANDLE device, struct efi_pci_device *efipci ); #endif /* _IPXE_EFI_PCI_H */ diff --git a/src/include/ipxe/efi/efi_snp.h b/src/include/ipxe/efi/efi_snp.h index 9076f1d56..c278b1d4c 100644 --- a/src/include/ipxe/efi/efi_snp.h +++ b/src/include/ipxe/efi/efi_snp.h @@ -76,7 +76,7 @@ struct efi_snp_device { }; extern int efi_snp_hii_install ( struct efi_snp_device *snpdev ); -extern void efi_snp_hii_uninstall ( struct efi_snp_device *snpdev ); +extern int efi_snp_hii_uninstall ( struct efi_snp_device *snpdev ); extern struct efi_snp_device * find_snpdev ( EFI_HANDLE handle ); extern struct efi_snp_device * last_opened_snpdev ( void ); extern void efi_snp_add_claim ( int delta ); diff --git a/src/include/ipxe/efi/efi_usb.h b/src/include/ipxe/efi/efi_usb.h index 05b4fad00..06baff529 100644 --- a/src/include/ipxe/efi/efi_usb.h +++ b/src/include/ipxe/efi/efi_usb.h @@ -17,14 +17,14 @@ struct efi_usb_device { /** Name */ const char *name; - /** The underlying USB device */ - struct usb_device *usb; - /** The underlying EFI device */ - struct efi_device *efidev; + /** The underlying USB function */ + struct usb_function *func; /** Configuration descriptor */ struct usb_configuration_descriptor *config; /** Supported languages */ - struct usb_descriptor_header *languages; + uint16_t *lang; + /** Length of supported languages */ + size_t lang_len; /** List of interfaces */ struct list_head interfaces; }; diff --git a/src/include/ipxe/efi/efi_utils.h b/src/include/ipxe/efi/efi_utils.h index 67acba17e..270d38dc8 100644 --- a/src/include/ipxe/efi/efi_utils.h +++ b/src/include/ipxe/efi/efi_utils.h @@ -9,13 +9,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include -#include struct device; -extern EFI_DEVICE_PATH_PROTOCOL * -efi_devpath_end ( EFI_DEVICE_PATH_PROTOCOL *path ); -extern size_t efi_devpath_len ( EFI_DEVICE_PATH_PROTOCOL *path ); extern int efi_locate_device ( EFI_HANDLE device, EFI_GUID *protocol, EFI_HANDLE *parent ); extern int efi_child_add ( EFI_HANDLE parent, EFI_HANDLE child ); diff --git a/src/include/ipxe/efi/efi_veto.h b/src/include/ipxe/efi/efi_veto.h new file mode 100644 index 000000000..c9ecbb05c --- /dev/null +++ b/src/include/ipxe/efi/efi_veto.h @@ -0,0 +1,13 @@ +#ifndef _IPXE_EFI_VETO_H +#define _IPXE_EFI_VETO_H + +/** @file + * + * EFI driver vetoes + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +extern void efi_veto ( void ); + +#endif /* _IPXE_EFI_VETO_H */ diff --git a/src/include/ipxe/efi/efi_wrap.h b/src/include/ipxe/efi/efi_wrap.h index d8ed1a5cc..6c7ccf2e4 100644 --- a/src/include/ipxe/efi/efi_wrap.h +++ b/src/include/ipxe/efi/efi_wrap.h @@ -10,6 +10,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include +extern EFI_SYSTEM_TABLE * efi_wrap_systab ( void ); extern void efi_wrap ( EFI_HANDLE handle ); #endif /* _IPXE_EFI_WRAP_H */ diff --git a/src/include/ipxe/errfile.h b/src/include/ipxe/errfile.h index 242f91f82..cf5757874 100644 --- a/src/include/ipxe/errfile.h +++ b/src/include/ipxe/errfile.h @@ -75,6 +75,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ERRFILE_sanboot ( ERRFILE_CORE | 0x00230000 ) #define ERRFILE_dummy_sanboot ( ERRFILE_CORE | 0x00240000 ) #define ERRFILE_fdt ( ERRFILE_CORE | 0x00250000 ) +#define ERRFILE_dma ( ERRFILE_CORE | 0x00260000 ) +#define ERRFILE_cachedhcp ( ERRFILE_CORE | 0x00270000 ) #define ERRFILE_eisa ( ERRFILE_DRIVER | 0x00000000 ) #define ERRFILE_isa ( ERRFILE_DRIVER | 0x00010000 ) @@ -208,6 +210,10 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ERRFILE_intelxl ( ERRFILE_DRIVER | 0x00cb0000 ) #define ERRFILE_pcimsix ( ERRFILE_DRIVER | 0x00cc0000 ) #define ERRFILE_intelxlvf ( ERRFILE_DRIVER | 0x00cd0000 ) +#define ERRFILE_usbblk ( ERRFILE_DRIVER | 0x00ce0000 ) +#define ERRFILE_iphone ( ERRFILE_DRIVER | 0x00cf0000 ) +#define ERRFILE_slirp ( ERRFILE_DRIVER | 0x00d00000 ) +#define ERRFILE_rdc ( ERRFILE_DRIVER | 0x00d10000 ) #define ERRFILE_aoe ( ERRFILE_NET | 0x00000000 ) #define ERRFILE_arp ( ERRFILE_NET | 0x00010000 ) @@ -284,6 +290,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ERRFILE_xsigo ( ERRFILE_NET | 0x00480000 ) #define ERRFILE_ntp ( ERRFILE_NET | 0x00490000 ) #define ERRFILE_httpntlm ( ERRFILE_NET | 0x004a0000 ) +#define ERRFILE_eap ( ERRFILE_NET | 0x004b0000 ) #define ERRFILE_image ( ERRFILE_IMAGE | 0x00000000 ) #define ERRFILE_elf ( ERRFILE_IMAGE | 0x00010000 ) @@ -295,6 +302,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ERRFILE_png ( ERRFILE_IMAGE | 0x00070000 ) #define ERRFILE_der ( ERRFILE_IMAGE | 0x00080000 ) #define ERRFILE_pem ( ERRFILE_IMAGE | 0x00090000 ) +#define ERRFILE_archive ( ERRFILE_IMAGE | 0x000a0000 ) +#define ERRFILE_zlib ( ERRFILE_IMAGE | 0x000b0000 ) +#define ERRFILE_gzip ( ERRFILE_IMAGE | 0x000c0000 ) #define ERRFILE_asn1 ( ERRFILE_OTHER | 0x00000000 ) #define ERRFILE_chap ( ERRFILE_OTHER | 0x00010000 ) @@ -378,7 +388,12 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ERRFILE_cert_cmd ( ERRFILE_OTHER | 0x004f0000 ) #define ERRFILE_acpi_settings ( ERRFILE_OTHER | 0x00500000 ) #define ERRFILE_ntlm ( ERRFILE_OTHER | 0x00510000 ) -#define ERRFILE_efi_blacklist ( ERRFILE_OTHER | 0x00520000 ) +#define ERRFILE_efi_veto ( ERRFILE_OTHER | 0x00520000 ) +#define ERRFILE_efi_autoboot ( ERRFILE_OTHER | 0x00530000 ) +#define ERRFILE_efi_autoexec ( ERRFILE_OTHER | 0x00540000 ) +#define ERRFILE_efi_cachedhcp ( ERRFILE_OTHER | 0x00550000 ) +#define ERRFILE_linux_sysfs ( ERRFILE_OTHER | 0x00560000 ) +#define ERRFILE_linux_acpi ( ERRFILE_OTHER | 0x00570000 ) /** @} */ diff --git a/src/include/ipxe/fcp.h b/src/include/ipxe/fcp.h index 853ca13f6..d86afab42 100644 --- a/src/include/ipxe/fcp.h +++ b/src/include/ipxe/fcp.h @@ -163,4 +163,12 @@ struct fcp_prli_service_parameters { /** Enhanced discovery supported */ #define FCP_PRLI_ENH_DISC 0x0800 +/** An FCP device description */ +struct fcp_description { + /** Fibre Channel WWN */ + struct fc_name wwn; + /** SCSI LUN */ + struct scsi_lun lun; +}; + #endif /* _IPXE_FCP_H */ diff --git a/src/include/ipxe/gzip.h b/src/include/ipxe/gzip.h new file mode 100644 index 000000000..c8cf64147 --- /dev/null +++ b/src/include/ipxe/gzip.h @@ -0,0 +1,71 @@ +#ifndef _IPXE_GZIP_H +#define _IPXE_GZIP_H + +/** @file + * + * gzip compressed images + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include + +/** gzip header */ +struct gzip_header { + /** Magic ID */ + uint16_t magic; + /** Compression method */ + uint8_t method; + /** Flags */ + uint8_t flags; + /** Modification time */ + uint32_t mtime; + /** Extra flags */ + uint8_t extra; + /** Operating system */ + uint8_t os; +} __attribute__ (( packed )); + +/** Magic ID */ +#define GZIP_MAGIC 0x1f8b + +/** Compression method */ +#define GZIP_METHOD_DEFLATE 0x08 + +/** CRC header is present */ +#define GZIP_FL_HCRC 0x02 + +/** Extra header is present */ +#define GZIP_FL_EXTRA 0x04 + +/** File name is present */ +#define GZIP_FL_NAME 0x08 + +/** File comment is present */ +#define GZIP_FL_COMMENT 0x10 + +/** gzip extra header */ +struct gzip_extra_header { + /** Extra header length (excluding this field) */ + uint16_t len; +} __attribute__ (( packed )); + +/** gzip CRC header */ +struct gzip_crc_header { + /** CRC-16 */ + uint16_t crc; +} __attribute__ (( packed )); + +/** gzip footer */ +struct gzip_footer { + /** CRC-32 */ + uint32_t crc; + /** Uncompressed size (modulo 2^32) */ + uint32_t len; +} __attribute__ (( packed )); + +extern struct image_type gzip_image_type __image_type ( PROBE_NORMAL ); + +#endif /* _IPXE_GZIP_H */ diff --git a/src/include/ipxe/http.h b/src/include/ipxe/http.h index 0893c9537..5a9baddcb 100644 --- a/src/include/ipxe/http.h +++ b/src/include/ipxe/http.h @@ -21,6 +21,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include struct http_transaction; +struct http_connection; /****************************************************************************** * @@ -43,13 +44,10 @@ struct http_scheme { unsigned int port; /** Transport-layer filter (if any) * - * @v xfer Data transfer interface - * @v name Host name - * @v next Next interface + * @v conn HTTP connection * @ret rc Return status code */ - int ( * filter ) ( struct interface *xfer, const char *name, - struct interface **next ); + int ( * filter ) ( struct http_connection *conn ); }; /** HTTP scheme table */ diff --git a/src/include/ipxe/ib_srp.h b/src/include/ipxe/ib_srp.h index ad407b0cf..4b6df8d3b 100644 --- a/src/include/ipxe/ib_srp.h +++ b/src/include/ipxe/ib_srp.h @@ -10,6 +10,8 @@ FILE_LICENCE ( BSD2 ); #include +#include +#include #include #include @@ -55,4 +57,37 @@ struct sbft_ib_subtable { uint8_t reserved[6]; } __attribute__ (( packed )); +/** + * An Infiniband SRP sBFT created by iPXE + */ +struct ipxe_ib_sbft { + /** The table header */ + struct sbft_table table; + /** The SCSI subtable */ + struct sbft_scsi_subtable scsi; + /** The SRP subtable */ + struct sbft_srp_subtable srp; + /** The Infiniband subtable */ + struct sbft_ib_subtable ib; +}; + +/** An Infiniband SRP device */ +struct ib_srp_device { + /** Reference count */ + struct refcnt refcnt; + + /** SRP transport interface */ + struct interface srp; + /** CMRC interface */ + struct interface cmrc; + + /** Infiniband device */ + struct ib_device *ibdev; + + /** ACPI descriptor */ + struct acpi_descriptor desc; + /** Boot firmware table parameters */ + struct ipxe_ib_sbft sbft; +}; + #endif /* _IPXE_IB_SRP_H */ diff --git a/src/include/ipxe/image.h b/src/include/ipxe/image.h index 2e7eb4cee..0a5a26034 100644 --- a/src/include/ipxe/image.h +++ b/src/include/ipxe/image.h @@ -113,6 +113,14 @@ struct image_type { */ int ( * asn1 ) ( struct image *image, size_t offset, struct asn1_cursor **cursor ); + /** + * Extract archive image + * + * @v image Image + * @v extracted Extracted image + * @ret rc Return status code + */ + int ( * extract ) ( struct image *image, struct image *extracted ); }; /** @@ -175,6 +183,8 @@ extern struct image * alloc_image ( struct uri *uri ); extern int image_set_uri ( struct image *image, struct uri *uri ); extern int image_set_name ( struct image *image, const char *name ); extern int image_set_cmdline ( struct image *image, const char *cmdline ); +extern int image_set_len ( struct image *image, size_t len ); +extern int image_set_data ( struct image *image, userptr_t data, size_t len ); extern int register_image ( struct image *image ); extern void unregister_image ( struct image *image ); struct image * find_image ( const char *name ); @@ -183,9 +193,14 @@ extern int image_replace ( struct image *replacement ); extern int image_select ( struct image *image ); extern struct image * image_find_selected ( void ); extern int image_set_trust ( int require_trusted, int permanent ); +extern struct image * image_memory ( const char *name, userptr_t data, + size_t len ); extern int image_pixbuf ( struct image *image, struct pixel_buffer **pixbuf ); extern int image_asn1 ( struct image *image, size_t offset, struct asn1_cursor **cursor ); +extern int image_extract ( struct image *image, const char *name, + struct image **extracted ); +extern int image_extract_exec ( struct image *image ); /** * Increment reference count on an image diff --git a/src/include/ipxe/infiniband.h b/src/include/ipxe/infiniband.h index 6f4951f17..379bc109e 100644 --- a/src/include/ipxe/infiniband.h +++ b/src/include/ipxe/infiniband.h @@ -416,6 +416,8 @@ struct ib_device { struct ib_device_operations *op; /** Port number */ unsigned int port; + /** Total ports on device */ + unsigned int ports; /** Port open request counter */ unsigned int open_count; @@ -538,7 +540,6 @@ extern int ib_mcast_attach ( struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid ); extern void ib_mcast_detach ( struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid ); -extern int ib_count_ports ( struct ib_device *ibdev ); extern int ib_set_port_info ( struct ib_device *ibdev, union ib_mad *mad ); extern int ib_set_pkey_table ( struct ib_device *ibdev, union ib_mad *mad ); extern struct ib_device * alloc_ibdev ( size_t priv_size ); diff --git a/src/include/ipxe/interface.h b/src/include/ipxe/interface.h index b65002c80..19f58a4b4 100644 --- a/src/include/ipxe/interface.h +++ b/src/include/ipxe/interface.h @@ -36,6 +36,21 @@ struct interface_operation { ? op_func : op_func ), \ } +/** + * Define an unused object interface operation + * + * @v op_type Operation type + * @v object_type Implementing method's expected object type + * @v op_func Implementing method + * @ret op Object interface operation + */ +#define UNUSED_INTF_OP( op_type, object_type, op_func ) { \ + .type = NULL, \ + .func = ( ( ( ( typeof ( op_func ) * ) NULL ) == \ + ( ( op_type ## _TYPE ( object_type ) * ) NULL ) ) \ + ? NULL : NULL ), \ + } + /** An object interface descriptor */ struct interface_descriptor { /** Offset of interface within containing object */ @@ -154,6 +169,8 @@ extern void intfs_shutdown ( int rc, ... ) __attribute__ (( sentinel )); extern void intf_restart ( struct interface *intf, int rc ); extern void intfs_vrestart ( va_list intfs, int rc ); extern void intfs_restart ( int rc, ... ) __attribute__ (( sentinel )); +extern void intf_insert ( struct interface *intf, struct interface *upper, + struct interface *lower ); extern void intf_poke ( struct interface *intf, void ( type ) ( struct interface *intf ) ); diff --git a/src/include/ipxe/iobuf.h b/src/include/ipxe/iobuf.h index b40ade350..3e079c064 100644 --- a/src/include/ipxe/iobuf.h +++ b/src/include/ipxe/iobuf.h @@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include /** * Minimum I/O buffer length @@ -38,6 +39,9 @@ struct io_buffer { */ struct list_head list; + /** DMA mapping */ + struct dma_mapping map; + /** Start of the buffer */ void *head; /** Start of data */ @@ -210,10 +214,75 @@ static inline void iob_populate ( struct io_buffer *iobuf, (iobuf) = NULL; \ __iobuf; } ) +/** + * Map I/O buffer for DMA + * + * @v iobuf I/O buffer + * @v dma DMA device + * @v len Length to map + * @v flags Mapping flags + * @ret rc Return status code + */ +static inline __always_inline int iob_map ( struct io_buffer *iobuf, + struct dma_device *dma, + size_t len, int flags ) { + return dma_map ( dma, &iobuf->map, virt_to_phys ( iobuf->data ), + len, flags ); +} + +/** + * Map I/O buffer for transmit DMA + * + * @v iobuf I/O buffer + * @v dma DMA device + * @ret rc Return status code + */ +static inline __always_inline int iob_map_tx ( struct io_buffer *iobuf, + struct dma_device *dma ) { + return iob_map ( iobuf, dma, iob_len ( iobuf ), DMA_TX ); +} + +/** + * Map empty I/O buffer for receive DMA + * + * @v iobuf I/O buffer + * @v dma DMA device + * @ret rc Return status code + */ +static inline __always_inline int iob_map_rx ( struct io_buffer *iobuf, + struct dma_device *dma ) { + assert ( iob_len ( iobuf ) == 0 ); + return iob_map ( iobuf, dma, iob_tailroom ( iobuf ), DMA_RX ); +} + +/** + * Get I/O buffer DMA address + * + * @v iobuf I/O buffer + * @ret addr DMA address + */ +static inline __always_inline physaddr_t iob_dma ( struct io_buffer *iobuf ) { + return dma ( &iobuf->map, iobuf->data ); +} + +/** + * Unmap I/O buffer for DMA + * + * @v iobuf I/O buffer + * @v dma DMA device + * @ret rc Return status code + */ +static inline __always_inline void iob_unmap ( struct io_buffer *iobuf ) { + dma_unmap ( &iobuf->map ); +} + extern struct io_buffer * __malloc alloc_iob_raw ( size_t len, size_t align, size_t offset ); extern struct io_buffer * __malloc alloc_iob ( size_t len ); extern void free_iob ( struct io_buffer *iobuf ); +extern struct io_buffer * __malloc alloc_rx_iob ( size_t len, + struct dma_device *dma ); +extern void free_rx_iob ( struct io_buffer *iobuf ); extern void iob_pad ( struct io_buffer *iobuf, size_t min_len ); extern int iob_ensure_headroom ( struct io_buffer *iobuf, size_t len ); extern struct io_buffer * iob_concatenate ( struct list_head *list ); diff --git a/src/include/ipxe/linux/linux_acpi.h b/src/include/ipxe/linux/linux_acpi.h new file mode 100644 index 000000000..a2c33ce2c --- /dev/null +++ b/src/include/ipxe/linux/linux_acpi.h @@ -0,0 +1,18 @@ +#ifndef _IPXE_LINUX_ACPI_H +#define _IPXE_LINUX_ACPI_H + +/** @file + * + * iPXE ACPI API for Linux + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#ifdef ACPI_LINUX +#define ACPI_PREFIX_linux +#else +#define ACPI_PREFIX_linux __linux_ +#endif + +#endif /* _IPXE_LINUX_ACPI_H */ diff --git a/src/include/ipxe/linux/linux_pci.h b/src/include/ipxe/linux/linux_pci.h index 22ae7f1bc..de42f766b 100644 --- a/src/include/ipxe/linux/linux_pci.h +++ b/src/include/ipxe/linux/linux_pci.h @@ -22,6 +22,17 @@ extern int linux_pci_read ( struct pci_device *pci, unsigned long where, extern int linux_pci_write ( struct pci_device *pci, unsigned long where, unsigned long value, size_t len ); +/** + * Determine number of PCI buses within system + * + * @ret num_bus Number of buses + */ +static inline __always_inline int +PCIAPI_INLINE ( linux, pci_num_bus ) ( void ) { + /* Assume all buses may exist */ + return 0x100; +} + /** * Read byte from PCI configuration space * @@ -127,4 +138,17 @@ PCIAPI_INLINE ( linux, pci_write_config_dword ) ( struct pci_device *pci, return linux_pci_write ( pci, where, value, sizeof ( value ) ); } +/** + * Map PCI bus address as an I/O address + * + * @v bus_addr PCI bus address + * @v len Length of region + * @ret io_addr I/O address, or NULL on error + */ +static inline __always_inline void * +PCIAPI_INLINE ( linux, pci_ioremap ) ( struct pci_device *pci __unused, + unsigned long bus_addr, size_t len ) { + return ioremap ( bus_addr, len ); +} + #endif /* _IPXE_LINUX_PCI_H */ diff --git a/src/include/ipxe/linux_api.h b/src/include/ipxe/linux_api.h new file mode 100644 index 000000000..5b0b242d1 --- /dev/null +++ b/src/include/ipxe/linux_api.h @@ -0,0 +1,106 @@ +#ifndef _IPXE_LINUX_API_H +#define _IPXE_LINUX_API_H + +/* + * Copyright (C) 2010 Piotr Jaroszyński . + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/** @file + * + * Linux host API + * + * This file is included from both the iPXE build environment and the + * host build environment. + * + */ + +#if __STDC_HOSTED__ +#define __asmcall +#define FILE_LICENCE(x) +#endif + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include + +#if ! __STDC_HOSTED__ +#define __KERNEL_STRICT_NAMES +#include +#include +#include +#include +#include +#include +#define MAP_FAILED ( ( void * ) -1 ) +#endif + +struct sockaddr; +struct slirp_config; +struct slirp_callbacks; +struct Slirp; + +extern int linux_errno; +extern int linux_argc; +extern char **linux_argv; + +extern int __asmcall linux_open ( const char *pathname, int flags, ... ); +extern int __asmcall linux_close ( int fd ); +extern off_t __asmcall linux_lseek ( int fd, off_t offset, int whence ); +extern ssize_t __asmcall linux_read ( int fd, void *buf, size_t count ); +extern ssize_t __asmcall linux_write ( int fd, const void *buf, size_t count ); +extern int __asmcall linux_fcntl ( int fd, int cmd, ... ); +extern int __asmcall linux_ioctl ( int fd, unsigned long request, ... ); +extern int __asmcall linux_fstat_size ( int fd, size_t *size ); +extern int __asmcall linux_poll ( struct pollfd *fds, unsigned int nfds, + int timeout ); +extern int __asmcall linux_nanosleep ( const struct timespec *req, + struct timespec *rem ); +extern int __asmcall linux_usleep ( unsigned int usec ); +extern int __asmcall linux_gettimeofday ( struct timeval *tv, + struct timezone *tz ); +extern void * __asmcall linux_mmap ( void *addr, size_t length, int prot, + int flags, int fd, off_t offset ); +extern void * __asmcall linux_mremap ( void *old_address, size_t old_size, + size_t new_size, int flags, ... ); +extern int __asmcall linux_munmap ( void *addr, size_t length ); +extern int __asmcall linux_socket ( int domain, int type, int protocol ); +extern int __asmcall linux_bind ( int sockfd, const struct sockaddr *addr, + size_t addrlen ); +extern ssize_t __asmcall linux_sendto ( int sockfd, const void *buf, + size_t len, int flags, + const struct sockaddr *dest_addr, + size_t addrlen ); +extern const char * __asmcall linux_strerror ( int linux_errno ); +extern struct Slirp * __asmcall +linux_slirp_new ( const struct slirp_config *config, + const struct slirp_callbacks *callbacks, void *opaque ); +extern void __asmcall linux_slirp_cleanup ( struct Slirp *slirp ); +extern void __asmcall linux_slirp_input ( struct Slirp *slirp, + const uint8_t *pkt, int pkt_len ); +extern void __asmcall +linux_slirp_pollfds_fill ( struct Slirp *slirp, uint32_t *timeout, + int ( __asmcall * add_poll ) ( int fd, int events, + void *opaque ), + void *opaque ); +extern void __asmcall +linux_slirp_pollfds_poll ( struct Slirp *slirp, int select_error, + int ( __asmcall * get_revents ) ( int idx, + void *opaque ), + void *opaque ); + +#endif /* _IPXE_LINUX_API_H */ diff --git a/src/include/ipxe/linux_sysfs.h b/src/include/ipxe/linux_sysfs.h new file mode 100644 index 000000000..d97b649c0 --- /dev/null +++ b/src/include/ipxe/linux_sysfs.h @@ -0,0 +1,16 @@ +#ifndef _IPXE_LINUX_SYSFS_H +#define _IPXE_LINUX_SYSFS_H + +/** @file + * + * Linux sysfs files + * + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include + +extern int linux_sysfs_read ( const char *filename, userptr_t *data ); + +#endif /* _IPXE_LINUX_SYSFS_H */ diff --git a/src/include/ipxe/malloc.h b/src/include/ipxe/malloc.h index 1878978fd..180ca001d 100644 --- a/src/include/ipxe/malloc.h +++ b/src/include/ipxe/malloc.h @@ -14,7 +14,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); /* * Prototypes for the standard functions (malloc() et al) are in * stdlib.h. Include only if you need the - * non-standard functions, such as malloc_dma(). + * non-standard functions, such as malloc_phys(). * */ #include @@ -32,20 +32,18 @@ extern void mpopulate ( void *start, size_t len ); extern void mdumpfree ( void ); /** - * Allocate memory for DMA + * Allocate memory with specified physical alignment and offset * * @v size Requested size * @v align Physical alignment * @v offset Offset from physical alignment * @ret ptr Memory, or NULL * - * Allocates physically-aligned memory for DMA. - * * @c align must be a power of two. @c size may not be zero. */ -static inline void * __malloc malloc_dma_offset ( size_t size, - size_t phys_align, - size_t offset ) { +static inline void * __malloc malloc_phys_offset ( size_t size, + size_t phys_align, + size_t offset ) { void * ptr = alloc_memblock ( size, phys_align, offset ); if ( ptr && size ) VALGRIND_MALLOCLIKE_BLOCK ( ptr, size, 0, 0 ); @@ -53,32 +51,30 @@ static inline void * __malloc malloc_dma_offset ( size_t size, } /** - * Allocate memory for DMA + * Allocate memory with specified physical alignment * * @v size Requested size * @v align Physical alignment * @ret ptr Memory, or NULL * - * Allocates physically-aligned memory for DMA. - * * @c align must be a power of two. @c size may not be zero. */ -static inline void * __malloc malloc_dma ( size_t size, size_t phys_align ) { - return malloc_dma_offset ( size, phys_align, 0 ); +static inline void * __malloc malloc_phys ( size_t size, size_t phys_align ) { + return malloc_phys_offset ( size, phys_align, 0 ); } /** - * Free memory allocated with malloc_dma() + * Free memory allocated with malloc_phys() * - * @v ptr Memory allocated by malloc_dma(), or NULL - * @v size Size of memory, as passed to malloc_dma() + * @v ptr Memory allocated by malloc_phys(), or NULL + * @v size Size of memory, as passed to malloc_phys() * - * Memory allocated with malloc_dma() can only be freed with - * free_dma(); it cannot be freed with the standard free(). + * Memory allocated with malloc_phys() can only be freed with + * free_phys(); it cannot be freed with the standard free(). * * If @c ptr is NULL, no action is taken. */ -static inline void free_dma ( void *ptr, size_t size ) { +static inline void free_phys ( void *ptr, size_t size ) { VALGRIND_FREELIKE_BLOCK ( ptr, 0 ); free_memblock ( ptr, size ); } diff --git a/src/include/ipxe/netdevice.h b/src/include/ipxe/netdevice.h index d498ab697..294f7b367 100644 --- a/src/include/ipxe/netdevice.h +++ b/src/include/ipxe/netdevice.h @@ -246,6 +246,10 @@ struct net_device_operations { * * This method is guaranteed to be called only when the device * is open. + * + * If the network device has an associated DMA device, then + * the I/O buffer will be automatically mapped for transmit + * DMA. */ int ( * transmit ) ( struct net_device *netdev, struct io_buffer *iobuf ); @@ -358,6 +362,8 @@ struct net_device { char name[NETDEV_NAME_LEN]; /** Underlying hardware device */ struct device *dev; + /** DMA device */ + struct dma_device *dma; /** Network device operations */ struct net_device_operations *op; @@ -445,6 +451,12 @@ struct net_device { */ #define NETDEV_IRQ_UNSUPPORTED 0x0008 +/** Network device transmission is in progress */ +#define NETDEV_TX_IN_PROGRESS 0x0010 + +/** Network device poll is in progress */ +#define NETDEV_POLL_IN_PROGRESS 0x0020 + /** Link-layer protocol table */ #define LL_PROTOCOLS __table ( struct ll_protocol, "ll_protocols" ) diff --git a/src/include/ipxe/null_acpi.h b/src/include/ipxe/null_acpi.h index 1e469e33d..cedb02839 100644 --- a/src/include/ipxe/null_acpi.h +++ b/src/include/ipxe/null_acpi.h @@ -15,8 +15,10 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ACPI_PREFIX_null __null_ #endif -static inline __always_inline userptr_t -ACPI_INLINE ( null, acpi_find_rsdt ) ( void ) { +static inline __attribute__ (( always_inline )) userptr_t +ACPI_INLINE ( null, acpi_find ) ( uint32_t signature __unused, + unsigned int index __unused ) { + return UNULL; } diff --git a/src/include/ipxe/open.h b/src/include/ipxe/open.h index 43d4cdc66..64e12d177 100644 --- a/src/include/ipxe/open.h +++ b/src/include/ipxe/open.h @@ -70,8 +70,6 @@ struct uri_opener { struct socket_opener { /** Communication semantics (e.g. SOCK_STREAM) */ int semantics; - /** Address family (e.g. AF_INET) */ - int family; /** Open socket * * @v intf Object interface diff --git a/src/include/ipxe/pci.h b/src/include/ipxe/pci.h index 272c4c06f..933f48530 100644 --- a/src/include/ipxe/pci.h +++ b/src/include/ipxe/pci.h @@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include /** PCI vendor ID */ @@ -134,6 +135,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define PCI_CLASS_SERIAL_USB_EHCI 0x20 /**< ECHI USB controller */ #define PCI_CLASS_SERIAL_USB_XHCI 0x30 /**< xHCI USB controller */ +/** Subordinate bus number */ +#define PCI_SUBORDINATE 0x1a + /** Construct PCI class * * @v base Base class (or PCI_ANY_ID) @@ -187,6 +191,8 @@ struct pci_class_id { struct pci_device { /** Generic device */ struct device dev; + /** DMA device */ + struct dma_device dma; /** Memory base * * This is the physical address of the first valid memory BAR. diff --git a/src/include/ipxe/pci_io.h b/src/include/ipxe/pci_io.h index 10e69763e..2dcdd9b28 100644 --- a/src/include/ipxe/pci_io.h +++ b/src/include/ipxe/pci_io.h @@ -11,6 +11,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include +#include #include /** @@ -122,4 +123,14 @@ int pci_write_config_word ( struct pci_device *pci, unsigned int where, int pci_write_config_dword ( struct pci_device *pci, unsigned int where, uint32_t value ); +/** + * Map PCI bus address as an I/O address + * + * @v bus_addr PCI bus address + * @v len Length of region + * @ret io_addr I/O address, or NULL on error + */ +void * pci_ioremap ( struct pci_device *pci, unsigned long bus_addr, + size_t len ); + #endif /* _IPXE_PCI_IO_H */ diff --git a/src/include/ipxe/privkey.h b/src/include/ipxe/privkey.h index 81108b6bf..a65cf6106 100644 --- a/src/include/ipxe/privkey.h +++ b/src/include/ipxe/privkey.h @@ -10,7 +10,60 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include +#include -extern struct asn1_cursor private_key; +/** A private key */ +struct private_key { + /** Reference counter */ + struct refcnt refcnt; + /** ASN.1 object builder */ + struct asn1_builder builder; +}; + +/** + * Get reference to private key + * + * @v key Private key + * @ret key Private key + */ +static inline __attribute__ (( always_inline )) struct private_key * +privkey_get ( struct private_key *key ) { + ref_get ( &key->refcnt ); + return key; +} + +/** + * Drop reference to private key + * + * @v key Private key + */ +static inline __attribute__ (( always_inline )) void +privkey_put ( struct private_key *key ) { + ref_put ( &key->refcnt ); +} + +/** + * Get private key ASN.1 cursor + * + * @v key Private key + * @ret cursor ASN.1 cursor + */ +static inline __attribute__ (( always_inline )) struct asn1_cursor * +privkey_cursor ( struct private_key *key ) { + return asn1_built ( &key->builder ); +} + +extern void privkey_free ( struct refcnt *refcnt ); + +/** + * Initialise empty private key + * + */ +static inline __attribute__ (( always_inline )) void +privkey_init ( struct private_key *key ) { + ref_init ( &key->refcnt, privkey_free ); +} + +extern struct private_key private_key; #endif /* _IPXE_PRIVKEY_H */ diff --git a/src/include/ipxe/rndis.h b/src/include/ipxe/rndis.h index bcb6d8e6a..e8ece1e85 100644 --- a/src/include/ipxe/rndis.h +++ b/src/include/ipxe/rndis.h @@ -84,7 +84,7 @@ struct rndis_initialise_completion { /** Packet alignment factor */ uint32_t align; /** Reserved */ - uint32_t reserved; + uint32_t reserved[2]; } __attribute__ (( packed )); /** RNDIS halt message */ @@ -237,7 +237,7 @@ struct rndis_packet_message { /** Per-packet information record */ struct rndis_packet_field ppi; /** Reserved */ - uint32_t reserved; + uint32_t reserved[2]; } __attribute__ (( packed )); /** RNDIS packet record */ diff --git a/src/include/ipxe/rotate.h b/src/include/ipxe/rotate.h index b5693e3ca..4dea09aeb 100644 --- a/src/include/ipxe/rotate.h +++ b/src/include/ipxe/rotate.h @@ -10,44 +10,62 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include +#define ROLx( data, rotation ) \ + ( ( (data) << (rotation) ) | \ + ( (data) >> ( ( 8 * sizeof (data) ) - (rotation) ) ) ); + +#define RORx( data, rotation ) \ + ( ( (data) >> (rotation) ) | \ + ( (data) << ( ( 8 * sizeof (data) ) - (rotation) ) ) ); + static inline __attribute__ (( always_inline )) uint8_t rol8 ( uint8_t data, unsigned int rotation ) { - return ( ( data << rotation ) | ( data >> ( 8 - rotation ) ) ); + return ROLx ( data, rotation ); } static inline __attribute__ (( always_inline )) uint8_t ror8 ( uint8_t data, unsigned int rotation ) { - return ( ( data >> rotation ) | ( data << ( 8 - rotation ) ) ); + return RORx ( data, rotation ); } static inline __attribute__ (( always_inline )) uint16_t rol16 ( uint16_t data, unsigned int rotation ) { - return ( ( data << rotation ) | ( data >> ( 16 - rotation ) ) ); + return ROLx ( data, rotation ); } static inline __attribute__ (( always_inline )) uint16_t ror16 ( uint16_t data, unsigned int rotation ) { - return ( ( data >> rotation ) | ( data << ( 16 - rotation ) ) ); + return RORx ( data, rotation ); } static inline __attribute__ (( always_inline )) uint32_t rol32 ( uint32_t data, unsigned int rotation ) { - return ( ( data << rotation ) | ( data >> ( 32 - rotation ) ) ); + return ROLx ( data, rotation ); } static inline __attribute__ (( always_inline )) uint32_t ror32 ( uint32_t data, unsigned int rotation ) { - return ( ( data >> rotation ) | ( data << ( 32 - rotation ) ) ); + return RORx ( data, rotation ); } static inline __attribute__ (( always_inline )) uint64_t rol64 ( uint64_t data, unsigned int rotation ) { - return ( ( data << rotation ) | ( data >> ( 64 - rotation ) ) ); + return ROLx ( data, rotation ); } static inline __attribute__ (( always_inline )) uint64_t ror64 ( uint64_t data, unsigned int rotation ) { - return ( ( data >> rotation ) | ( data << ( 64 - rotation ) ) ); + return RORx ( data, rotation ); +} + +static inline __attribute__ (( always_inline )) unsigned long +roll ( unsigned long data, unsigned int rotation ) { + return ROLx ( data, rotation ); +} + +static inline __attribute__ (( always_inline )) unsigned long +rorl ( unsigned long data, unsigned int rotation ) { + return RORx ( data, rotation ); } #endif /* _IPXE_ROTATE_H */ diff --git a/src/include/ipxe/slirp.h b/src/include/ipxe/slirp.h new file mode 100644 index 000000000..4fb13b934 --- /dev/null +++ b/src/include/ipxe/slirp.h @@ -0,0 +1,155 @@ +#ifndef _IPXE_SLIRP_H +#define _IPXE_SLIRP_H + +/** @file + * + * Linux Slirp network driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include + +/** Ready to be read */ +#define SLIRP_EVENT_IN 0x01 + +/** Ready to be written */ +#define SLIRP_EVENT_OUT 0x02 + +/** Exceptional condition */ +#define SLIRP_EVENT_PRI 0x04 + +/** Error condition */ +#define SLIRP_EVENT_ERR 0x08 + +/** Hang up */ +#define SLIRP_EVENT_HUP 0x10 + +/** Slirp device configuration */ +struct slirp_config { + /** Configuration version */ + uint32_t version; + /** Restrict to host loopback connections only */ + int restricted; + /** IPv4 is enabled */ + bool in_enabled; + /** IPv4 network */ + struct in_addr vnetwork; + /** IPv4 netmask */ + struct in_addr vnetmask; + /** IPv4 host server address */ + struct in_addr vhost; + /** IPv6 is enabled */ + bool in6_enabled; + /** IPv6 prefix */ + struct in6_addr vprefix_addr6; + /** IPv6 prefix length */ + uint8_t vprefix_len; + /** IPv6 host server address */ + struct in6_addr vhost6; + /** Client hostname */ + const char *vhostname; + /** TFTP server name */ + const char *tftp_server_name; + /** TFTP path prefix */ + const char *tftp_path; + /** Boot filename */ + const char *bootfile; + /** DHCPv4 start address */ + struct in_addr vdhcp_start; + /** DNS IPv4 address */ + struct in_addr vnameserver; + /** DNS IPv6 address */ + struct in_addr vnameserver6; + /** DNS search list */ + const char **vdnssearch; + /** Domain name */ + const char *vdomainname; + /** Interface MTU */ + size_t if_mtu; + /** Interface MRU */ + size_t if_mru; + /** Disable host loopback connections */ + bool disable_host_loopback; + /** Enable emulation (apparently unsafe) */ + bool enable_emu; +}; + +/** Slirp device callbacks */ +struct slirp_callbacks { + /** + * Send packet + * + * @v buf Data buffer + * @v len Length of data + * @v device Device opaque pointer + * @ret len Consumed length (or negative on error) + */ + ssize_t ( __asmcall * send_packet ) ( const void *buf, size_t len, + void *device ); + /** + * Print an error message + * + * @v msg Error message + * @v device Device opaque pointer + */ + void ( __asmcall * guest_error ) ( const char *msg, void *device ); + /** + * Get virtual clock + * + * @v device Device opaque pointer + * @ret clock_ns Clock time in nanoseconds + */ + int64_t ( __asmcall * clock_get_ns ) ( void *device ); + /** + * Create a new timer + * + * @v callback Timer callback + * @v opaque Timer opaque pointer + * @v device Device opaque pointer + * @ret timer Timer + */ + void * ( __asmcall * timer_new ) ( void ( __asmcall * callback ) + ( void *opaque ), + void *opaque, void *device ); + /** + * Delete a timer + * + * @v timer Timer + * @v device Device opaque pointer + */ + void ( __asmcall * timer_free ) ( void *timer, void *device ); + /** + * Set timer expiry time + * + * @v timer Timer + * @v expire Expiry time + * @v device Device opaque pointer + */ + void ( __asmcall * timer_mod ) ( void *timer, int64_t expire, + void *device ); + /** + * Register file descriptor for polling + * + * @v fd File descriptor + * @v device Device opaque pointer + */ + void ( __asmcall * register_poll_fd ) ( int fd, void *device ); + /** + * Unregister file descriptor + * + * @v fd File descriptor + * @v device Device opaque pointer + */ + void ( __asmcall * unregister_poll_fd ) ( int fd, void *device ); + /** + * Notify that new events are ready + * + * @v device Device opaque pointer + */ + void ( __asmcall * notify ) ( void *device ); +}; + +#endif /* _IPXE_SLIRP_H */ diff --git a/src/include/ipxe/smbios.h b/src/include/ipxe/smbios.h index c1d8fea3e..42278fb24 100644 --- a/src/include/ipxe/smbios.h +++ b/src/include/ipxe/smbios.h @@ -31,15 +31,20 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); /* Include all architecture-dependent SMBIOS API headers */ #include -/** Signature for SMBIOS entry point */ +/** Signature for 32-bit SMBIOS entry point */ #define SMBIOS_SIGNATURE \ ( ( '_' << 0 ) + ( 'S' << 8 ) + ( 'M' << 16 ) + ( '_' << 24 ) ) +/** Signature for 64-bit SMBIOS entry point */ +#define SMBIOS3_SIGNATURE \ + ( ( '_' << 0 ) + ( 'S' << 8 ) + ( 'M' << 16 ) + ( '3' << 24 ) ) + /** - * SMBIOS entry point + * SMBIOS 32-bit entry point * - * This is the single table which describes the list of SMBIOS - * structures. It is located by scanning through the BIOS segment. + * This is the 32-bit version of the table which describes the list of + * SMBIOS structures. It may be located by scanning through the BIOS + * segment or via an EFI configuration table. */ struct smbios_entry { /** Signature @@ -75,6 +80,41 @@ struct smbios_entry { uint8_t bcd_revision; } __attribute__ (( packed )); +/** + * SMBIOS 64-bit entry point + * + * This is the 64-bit version of the table which describes the list of + * SMBIOS structures. It may be located by scanning through the BIOS + * segment or via an EFI configuration table. + */ +struct smbios3_entry { + /** Signature + * + * Must be equal to SMBIOS3_SIGNATURE + */ + uint32_t signature; + /** Signature extra byte */ + uint8_t extra; + /** Checksum */ + uint8_t checksum; + /** Length */ + uint8_t len; + /** Major version */ + uint8_t major; + /** Minor version */ + uint8_t minor; + /** Documentation revision */ + uint8_t docrev; + /** Entry point revision */ + uint8_t revision; + /** Reserved */ + uint8_t reserved; + /** Structure table length */ + uint32_t smbios_len; + /** Structure table address */ + uint64_t smbios_address; +} __attribute__ (( packed )); + /** An SMBIOS structure header */ struct smbios_header { /** Type */ @@ -155,6 +195,9 @@ struct smbios_enclosure_information { /** SMBIOS OEM strings structure type */ #define SMBIOS_TYPE_OEM_STRINGS 11 +/** SMBIOS end of table type */ +#define SMBIOS_TYPE_END 127 + /** * SMBIOS entry point descriptor * @@ -192,5 +235,6 @@ extern int read_smbios_string ( struct smbios_structure *structure, unsigned int index, void *data, size_t len ); extern int smbios_version ( void ); +extern void smbios_clear ( void ); #endif /* _IPXE_SMBIOS_H */ diff --git a/src/include/ipxe/tables.h b/src/include/ipxe/tables.h index 60f8efdea..28a87da96 100644 --- a/src/include/ipxe/tables.h +++ b/src/include/ipxe/tables.h @@ -444,75 +444,4 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); pointer >= table_start ( table ) ; \ pointer-- ) -/****************************************************************************** - * - * Intel's C compiler chokes on several of the constructs used in this - * file. The workarounds are ugly, so we use them only for an icc - * build. - * - */ -#define ICC_ALIGN_HACK_FACTOR 128 -#ifdef __ICC - -/* - * icc miscompiles zero-length arrays by inserting padding to a length - * of two array elements. We therefore have to generate the - * __table_entries() symbols by hand in asm. - * - */ -#undef __table_entries -#define __table_entries( table, idx ) ( { \ - extern __table_type ( table ) \ - __table_temp_sym ( idx, __LINE__ ) [] \ - __table_entry ( table, idx ) \ - asm ( __table_entries_sym ( table, idx ) ); \ - __asm__ ( ".ifndef %c0\n\t" \ - ".section " __table_section ( table, idx ) "\n\t" \ - ".align %c1\n\t" \ - "\n%c0:\n\t" \ - ".previous\n\t" \ - ".endif\n\t" \ - : : "i" ( __table_temp_sym ( idx, __LINE__ ) ), \ - "i" ( __table_alignment ( table ) ) ); \ - __table_temp_sym ( idx, __LINE__ ); } ) -#define __table_entries_sym( table, idx ) \ - "__tbl_" __table_name ( table ) "_" #idx -#define __table_temp_sym( a, b ) \ - ___table_temp_sym( __table_, a, _, b ) -#define ___table_temp_sym( a, b, c, d ) a ## b ## c ## d - -/* - * icc ignores __attribute__ (( aligned (x) )) when it is used to - * decrease the compiler's default choice of alignment (which may be - * higher than the alignment actually required by the structure). We - * work around this by forcing the alignment to a large multiple of - * the required value (so that we are never attempting to decrease the - * default alignment) and then postprocessing the object file to - * reduce the alignment back down to the "real" value. - * - */ -#undef __table_alignment -#define __table_alignment( table ) \ - ( ICC_ALIGN_HACK_FACTOR * __alignof__ ( __table_type ( table ) ) ) - -/* - * Because of the alignment hack, we must ensure that the compiler - * never tries to place multiple objects within the same section, - * otherwise the assembler will insert padding to the (incorrect) - * alignment boundary. Do this by appending the line number to table - * section names. - * - * Note that we don't need to worry about padding between array - * elements, since the alignment is declared on the variable (i.e. the - * whole array) rather than on the type (i.e. on all individual array - * elements). - */ -#undef __table_section -#define __table_section( table, idx ) \ - ".tbl." __table_name ( table ) "." __table_str ( idx ) \ - "." __table_xstr ( __LINE__ ) -#define __table_xstr( x ) __table_str ( x ) - -#endif /* __ICC */ - #endif /* _IPXE_TABLES_H */ diff --git a/src/include/ipxe/tls.h b/src/include/ipxe/tls.h index febbdc589..8b03579cc 100644 --- a/src/include/ipxe/tls.h +++ b/src/include/ipxe/tls.h @@ -18,6 +18,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include #include @@ -255,6 +256,11 @@ struct tls_session { /** Server name */ const char *name; + /** Root of trust */ + struct x509_root *root; + /** Private key */ + struct private_key *key; + /** Session ID */ uint8_t id[32]; /** Length of session ID */ @@ -319,13 +325,17 @@ struct tls_connection { struct digest_algorithm *handshake_digest; /** Digest algorithm context used for handshake verification */ uint8_t *handshake_ctx; - /** Client certificate (if used) */ - struct x509_certificate *cert; + /** Private key */ + struct private_key *key; + /** Client certificate chain (if used) */ + struct x509_chain *certs; /** Secure renegotiation flag */ int secure_renegotiation; /** Verification data */ struct tls_verify_data verify; + /** Root of trust */ + struct x509_root *root; /** Server certificate chain */ struct x509_chain *chain; /** Certificate validator */ @@ -379,6 +389,6 @@ struct tls_connection { #define TLS_RX_ALIGN 16 extern int add_tls ( struct interface *xfer, const char *name, - struct interface **next ); + struct x509_root *root, struct private_key *key ); #endif /* _IPXE_TLS_H */ diff --git a/src/include/ipxe/usb.h b/src/include/ipxe/usb.h index 68289d26d..911247ede 100644 --- a/src/include/ipxe/usb.h +++ b/src/include/ipxe/usb.h @@ -580,6 +580,7 @@ usb_endpoint_described ( struct usb_endpoint *ep, struct usb_interface_descriptor *interface, unsigned int type, unsigned int index ); extern int usb_endpoint_open ( struct usb_endpoint *ep ); +extern int usb_endpoint_clear_halt ( struct usb_endpoint *ep ); extern void usb_endpoint_close ( struct usb_endpoint *ep ); extern int usb_message ( struct usb_endpoint *ep, unsigned int request, unsigned int value, unsigned int index, @@ -620,6 +621,7 @@ usb_recycle ( struct usb_endpoint *ep, struct io_buffer *iobuf ) { } extern int usb_prefill ( struct usb_endpoint *ep ); +extern int usb_refill_limit ( struct usb_endpoint *ep, unsigned int max ); extern int usb_refill ( struct usb_endpoint *ep ); extern void usb_flush ( struct usb_endpoint *ep ); @@ -1237,6 +1239,23 @@ usb_set_interface ( struct usb_device *usb, unsigned int interface, NULL, 0 ); } +/** + * Get USB depth + * + * @v usb USB device + * @ret depth Hub depth + */ +static inline unsigned int usb_depth ( struct usb_device *usb ) { + struct usb_device *parent; + unsigned int depth; + + /* Navigate up to root hub, constructing depth as we go */ + for ( depth = 0 ; ( parent = usb->port->hub->usb ) ; usb = parent ) + depth++; + + return depth; +} + extern struct list_head usb_buses; extern struct usb_interface_descriptor * @@ -1272,7 +1291,6 @@ extern struct usb_bus * find_usb_bus_by_location ( unsigned int bus_type, extern int usb_alloc_address ( struct usb_bus *bus ); extern void usb_free_address ( struct usb_bus *bus, unsigned int address ); extern unsigned int usb_route_string ( struct usb_device *usb ); -extern unsigned int usb_depth ( struct usb_device *usb ); extern struct usb_port * usb_root_hub_port ( struct usb_device *usb ); extern struct usb_port * usb_transaction_translator ( struct usb_device *usb ); @@ -1396,6 +1414,9 @@ struct usb_driver { /** Declare a USB driver */ #define __usb_driver __table_entry ( USB_DRIVERS, 01 ) +/** Declare a USB fallback driver */ +#define __usb_fallback_driver __table_entry ( USB_DRIVERS, 02 ) + /** USB driver scores */ enum usb_driver_score { /** Fallback driver (has no effect on overall score) */ diff --git a/src/include/ipxe/validator.h b/src/include/ipxe/validator.h index 0aee56eb0..367e4045d 100644 --- a/src/include/ipxe/validator.h +++ b/src/include/ipxe/validator.h @@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include -extern int create_validator ( struct interface *job, struct x509_chain *chain ); +extern int create_validator ( struct interface *job, struct x509_chain *chain, + struct x509_root *root ); #endif /* _IPXE_VALIDATOR_H */ diff --git a/src/include/ipxe/x509.h b/src/include/ipxe/x509.h index 78eeafbfb..c703c8f10 100644 --- a/src/include/ipxe/x509.h +++ b/src/include/ipxe/x509.h @@ -191,6 +191,8 @@ struct x509_certificate { /** Flags */ unsigned int flags; + /** Root against which certificate has been validated (if any) */ + struct x509_root *root; /** Maximum number of subsequent certificates in chain */ unsigned int path_remaining; @@ -218,12 +220,10 @@ struct x509_certificate { /** X.509 certificate flags */ enum x509_flags { - /** Certificate has been validated */ - X509_FL_VALIDATED = 0x0001, /** Certificate was added at build time */ - X509_FL_PERMANENT = 0x0002, + X509_FL_PERMANENT = 0x0001, /** Certificate was added explicitly at run time */ - X509_FL_EXPLICIT = 0x0004, + X509_FL_EXPLICIT = 0x0002, }; /** @@ -340,8 +340,10 @@ struct x509_access_method { const struct asn1_cursor *raw ); }; -/** An X.509 root certificate store */ +/** An X.509 root certificate list */ struct x509_root { + /** Reference count */ + struct refcnt refcnt; /** Fingerprint digest algorithm */ struct digest_algorithm *digest; /** Number of certificates */ @@ -350,11 +352,35 @@ struct x509_root { const void *fingerprints; }; +/** + * Get reference to X.509 root certificate list + * + * @v root X.509 root certificate list + * @ret root X.509 root certificate list + */ +static inline __attribute__ (( always_inline )) struct x509_root * +x509_root_get ( struct x509_root *root ) { + ref_get ( &root->refcnt ); + return root; +} + +/** + * Drop reference to X.509 root certificate list + * + * @v root X.509 root certificate list + */ +static inline __attribute__ (( always_inline )) void +x509_root_put ( struct x509_root *root ) { + ref_put ( &root->refcnt ); +} + extern const char * x509_name ( struct x509_certificate *cert ); extern int x509_parse ( struct x509_certificate *cert, const struct asn1_cursor *raw ); extern int x509_certificate ( const void *data, size_t len, struct x509_certificate **cert ); +extern int x509_is_valid ( struct x509_certificate *cert, + struct x509_root *root ); extern int x509_validate ( struct x509_certificate *cert, struct x509_certificate *issuer, time_t time, struct x509_root *root ); @@ -383,22 +409,14 @@ extern int x509_check_root ( struct x509_certificate *cert, struct x509_root *root ); extern int x509_check_time ( struct x509_certificate *cert, time_t time ); -/** - * Check if X.509 certificate is valid - * - * @v cert X.509 certificate - */ -static inline int x509_is_valid ( struct x509_certificate *cert ) { - return ( cert->flags & X509_FL_VALIDATED ); -} - /** * Invalidate X.509 certificate * * @v cert X.509 certificate */ static inline void x509_invalidate ( struct x509_certificate *cert ) { - cert->flags &= ~X509_FL_VALIDATED; + x509_root_put ( cert->root ); + cert->root = NULL; cert->path_remaining = 0; } diff --git a/src/include/ipxe/xengrant.h b/src/include/ipxe/xengrant.h index 451a3ceee..fcb7a7157 100644 --- a/src/include/ipxe/xengrant.h +++ b/src/include/ipxe/xengrant.h @@ -166,16 +166,17 @@ xengrant_invalidate ( struct xen_hypervisor *xen, grant_ref_t ref ) { * @v ref Grant reference * @v domid Domain ID * @v subflags Additional flags - * @v page Page start + * @v addr Physical address within page * @ret rc Return status code */ static inline __attribute__ (( always_inline )) int xengrant_permit_access ( struct xen_hypervisor *xen, grant_ref_t ref, - domid_t domid, unsigned int subflags, void *page ) { + domid_t domid, unsigned int subflags, + physaddr_t addr ) { struct grant_entry_header *hdr = xengrant_header ( xen, ref ); struct grant_entry_v1 *v1 = xengrant_v1 ( hdr ); union grant_entry_v2 *v2 = xengrant_v2 ( hdr ); - unsigned long frame = ( virt_to_phys ( page ) / PAGE_SIZE ); + unsigned long frame = ( addr / PAGE_SIZE ); /* Fail (for test purposes) if applicable */ if ( ( XENGRANT_FAIL_RATE > 0 ) && diff --git a/src/include/ipxe/zlib.h b/src/include/ipxe/zlib.h new file mode 100644 index 000000000..29016c38e --- /dev/null +++ b/src/include/ipxe/zlib.h @@ -0,0 +1,43 @@ +#ifndef _IPXE_ZLIB_H +#define _IPXE_ZLIB_H + +/** @file + * + * zlib compressed images + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include + +/** zlib magic header */ +union zlib_magic { + /** Compression method and flags */ + uint8_t cmf; + /** Check value */ + uint16_t check; +} __attribute__ (( packed )); + +/** + * Check that zlib magic header is valid + * + * @v magic Magic header + * @ret is_valid Magic header is valid + */ +static inline int zlib_magic_is_valid ( union zlib_magic *magic ) { + + /* Check magic value as per RFC 6713 */ + return ( ( ( magic->cmf & 0x8f ) == 0x08 ) && + ( ( be16_to_cpu ( magic->check ) % 31 ) == 0 ) ); +} + +extern int zlib_deflate ( enum deflate_format format, struct deflate_chunk *in, + struct image *extracted ); + +extern struct image_type zlib_image_type __image_type ( PROBE_NORMAL ); + +#endif /* _IPXE_ZLIB_H */ diff --git a/src/include/linux_api.h b/src/include/linux_api.h deleted file mode 100644 index fe9fa910f..000000000 --- a/src/include/linux_api.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2010 Piotr Jaroszyński - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#ifndef _LINUX_API_H -#define _LINUX_API_H - -/** * @file - * - * Linux API prototypes. - * Most of the functions map directly to linux syscalls and are the equivalent - * of POSIX functions with the linux_ prefix removed. - */ - -FILE_LICENCE(GPL2_OR_LATER); - -#include -#include - -#include - -#define __KERNEL_STRICT_NAMES -#include -#include -typedef __kernel_pid_t pid_t; -typedef __kernel_suseconds_t suseconds_t; -typedef __kernel_loff_t loff_t; -#include -#include -#include -#include -#include -typedef unsigned long nfds_t; -typedef uint32_t useconds_t; -typedef uint32_t socklen_t; -struct sockaddr; -#define MAP_FAILED ( ( void * ) -1 ) -#define SEEK_SET 0 - -extern long linux_syscall ( int number, ... ); - -extern int linux_open ( const char *pathname, int flags ); -extern int linux_close ( int fd ); -extern off_t linux_lseek ( int fd, off_t offset, int whence ); -extern __kernel_ssize_t linux_read ( int fd, void *buf, __kernel_size_t count ); -extern __kernel_ssize_t linux_write ( int fd, const void *buf, - __kernel_size_t count ); -extern int linux_fcntl ( int fd, int cmd, ... ); -extern int linux_ioctl ( int fd, int request, ... ); -extern int linux_poll ( struct pollfd *fds, nfds_t nfds, int timeout ); -extern int linux_nanosleep ( const struct timespec *req, struct timespec *rem ); -extern int linux_usleep ( useconds_t usec ); -extern int linux_gettimeofday ( struct timeval *tv, struct timezone *tz ); -extern void * linux_mmap ( void *addr, __kernel_size_t length, int prot, - int flags, int fd, off_t offset ); -extern void * linux_mremap ( void *old_address, __kernel_size_t old_size, - __kernel_size_t new_size, int flags ); -extern int linux_munmap ( void *addr, __kernel_size_t length ); -extern int linux_socket ( int domain, int type_, int protocol ); -extern int linux_bind ( int fd, const struct sockaddr *addr, - socklen_t addrlen ); -extern ssize_t linux_sendto ( int fd, const void *buf, size_t len, int flags, - const struct sockaddr *daddr, socklen_t addrlen ); - -extern const char * linux_strerror ( int errnum ); - -#endif /* _LINUX_API_H */ diff --git a/src/include/mii.h b/src/include/mii.h index e2afef854..515ba224d 100644 --- a/src/include/mii.h +++ b/src/include/mii.h @@ -23,6 +23,8 @@ FILE_LICENCE ( GPL2_ONLY ); #define MII_EXPANSION 0x06 /* Expansion register */ #define MII_CTRL1000 0x09 /* 1000BASE-T control */ #define MII_STAT1000 0x0a /* 1000BASE-T status */ +#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */ +#define MII_MMD_DATA 0x0e /* MMD Access Data Register */ #define MII_ESTATUS 0x0f /* Extended Status */ #define MII_DCOUNTER 0x12 /* Disconnect counter */ #define MII_FCSCOUNTER 0x13 /* False carrier counter */ diff --git a/src/include/readline/readline.h b/src/include/readline/readline.h index afafbbdf5..3caf28b47 100644 --- a/src/include/readline/readline.h +++ b/src/include/readline/readline.h @@ -51,7 +51,8 @@ struct readline_history { extern void history_free ( struct readline_history *history ); extern int readline_history ( const char *prompt, const char *prefill, - struct readline_history *history, char **line ); + struct readline_history *history, + unsigned long timeout, char **line ); extern char * __malloc readline ( const char *prompt ); #endif /* _READLINE_H */ diff --git a/src/include/stdio.h b/src/include/stdio.h index a618482ce..ac17da83d 100644 --- a/src/include/stdio.h +++ b/src/include/stdio.h @@ -6,7 +6,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include -extern void putchar ( int character ); +extern int putchar ( int character ); extern int getchar ( void ); diff --git a/src/include/string.h b/src/include/string.h index 0f4182001..5f5aecb92 100644 --- a/src/include/string.h +++ b/src/include/string.h @@ -15,6 +15,8 @@ extern void * generic_memset ( void *dest, int character, size_t len ) __nonnull; extern void * generic_memcpy ( void *dest, const void *src, size_t len ) __nonnull; +extern void * generic_memcpy_reverse ( void *dest, const void *src, + size_t len ) __nonnull; extern void * generic_memmove ( void *dest, const void *src, size_t len ) __nonnull; diff --git a/src/include/strings.h b/src/include/strings.h index fab26dc28..d7e9d6971 100644 --- a/src/include/strings.h +++ b/src/include/strings.h @@ -189,5 +189,7 @@ bzero ( void *dest, size_t len ) { } int __pure strcasecmp ( const char *first, const char *second ) __nonnull; +int __pure strncasecmp ( const char *first, const char *second, + size_t max ) __nonnull; #endif /* _STRINGS_H */ diff --git a/src/include/usr/ifmgmt.h b/src/include/usr/ifmgmt.h index 5c386327b..8d8a6bb56 100644 --- a/src/include/usr/ifmgmt.h +++ b/src/include/usr/ifmgmt.h @@ -14,9 +14,11 @@ struct net_device_configurator; extern int ifopen ( struct net_device *netdev ); extern int ifconf ( struct net_device *netdev, - struct net_device_configurator *configurator ); + struct net_device_configurator *configurator, + unsigned long timeout ); extern void ifclose ( struct net_device *netdev ); extern void ifstat ( struct net_device *netdev ); -extern int iflinkwait ( struct net_device *netdev, unsigned long timeout ); +extern int iflinkwait ( struct net_device *netdev, unsigned long timeout, + int verbose ); #endif /* _USR_IFMGMT_H */ diff --git a/src/include/usr/imgarchive.h b/src/include/usr/imgarchive.h new file mode 100644 index 000000000..bf0c18f55 --- /dev/null +++ b/src/include/usr/imgarchive.h @@ -0,0 +1,16 @@ +#ifndef _USR_IMGARCHIVE_H +#define _USR_IMGARCHIVE_H + +/** @file + * + * Archive image management + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include + +extern int imgextract ( struct image *image, const char *name ); + +#endif /* _USR_IMGARCHIVE_H */ diff --git a/src/include/usr/imgmgmt.h b/src/include/usr/imgmgmt.h index 806df0bfb..14fb7cbc6 100644 --- a/src/include/usr/imgmgmt.h +++ b/src/include/usr/imgmgmt.h @@ -18,5 +18,6 @@ extern int imgdownload_string ( const char *uri_string, unsigned long timeout, extern int imgacquire ( const char *name, unsigned long timeout, struct image **image ); extern void imgstat ( struct image *image ); +extern int imgmem ( const char *name, userptr_t data, size_t len ); #endif /* _USR_IMGMGMT_H */ diff --git a/src/interface/efi/efi_acpi.c b/src/interface/efi/efi_acpi.c index a347eaf3a..07a225632 100644 --- a/src/interface/efi/efi_acpi.c +++ b/src/interface/efi/efi_acpi.c @@ -54,3 +54,4 @@ static userptr_t efi_find_rsdt ( void ) { } PROVIDE_ACPI ( efi, acpi_find_rsdt, efi_find_rsdt ); +PROVIDE_ACPI_INLINE ( efi, acpi_find ); diff --git a/src/interface/efi/efi_autoboot.c b/src/interface/efi/efi_autoboot.c index a9e807e23..08d67f761 100644 --- a/src/interface/efi/efi_autoboot.c +++ b/src/interface/efi/efi_autoboot.c @@ -23,6 +23,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +#include +#include #include #include #include @@ -37,8 +39,10 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); /** * Identify autoboot device * + * @v device Device handle + * @ret rc Return status code */ -void efi_set_autoboot ( void ) { +int efi_set_autoboot_ll_addr ( EFI_HANDLE device ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; union { EFI_SIMPLE_NETWORK_PROTOCOL *snp; @@ -46,26 +50,30 @@ void efi_set_autoboot ( void ) { } snp; EFI_SIMPLE_NETWORK_MODE *mode; EFI_STATUS efirc; + int rc; /* Look for an SNP instance on the image's device handle */ - if ( ( efirc = bs->OpenProtocol ( efi_loaded_image->DeviceHandle, + if ( ( efirc = bs->OpenProtocol ( device, &efi_simple_network_protocol_guid, &snp.interface, efi_image_handle, NULL, EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ - DBGC ( efi_loaded_image, "EFI found no autoboot device\n" ); - return; + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s has no SNP instance: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + return rc; } /* Record autoboot device */ mode = snp.snp->Mode; set_autoboot_ll_addr ( &mode->CurrentAddress, mode->HwAddressSize ); - DBGC ( efi_loaded_image, "EFI found autoboot link-layer address:\n" ); - DBGC_HDA ( efi_loaded_image, 0, &mode->CurrentAddress, - mode->HwAddressSize ); + DBGC ( device, "EFI %s found autoboot link-layer address:\n", + efi_handle_name ( device ) ); + DBGC_HDA ( device, 0, &mode->CurrentAddress, mode->HwAddressSize ); /* Close protocol */ - bs->CloseProtocol ( efi_loaded_image->DeviceHandle, - &efi_simple_network_protocol_guid, + bs->CloseProtocol ( device, &efi_simple_network_protocol_guid, efi_image_handle, NULL ); + + return 0; } diff --git a/src/interface/efi/efi_autoexec.c b/src/interface/efi/efi_autoexec.c new file mode 100644 index 000000000..88eb379bb --- /dev/null +++ b/src/interface/efi/efi_autoexec.c @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI autoexec script + * + */ + +/** Autoexec script filename */ +#define AUTOEXEC_FILENAME L"autoexec.ipxe" + +/** Autoexec script image name */ +#define AUTOEXEC_NAME "autoexec.ipxe" + +/** Autoexec script (if any) */ +static void *efi_autoexec; + +/** Autoexec script length */ +static size_t efi_autoexec_len; + +/** + * Load autoexec script + * + * @v device Device handle + * @ret rc Return status code + */ +int efi_autoexec_load ( EFI_HANDLE device ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + static wchar_t name[] = AUTOEXEC_FILENAME; + union { + void *interface; + EFI_SIMPLE_FILE_SYSTEM_PROTOCOL *fs; + } u; + struct { + EFI_FILE_INFO info; + CHAR16 name[ sizeof ( name ) / sizeof ( name[0] ) ]; + } info; + EFI_FILE_PROTOCOL *root; + EFI_FILE_PROTOCOL *file; + UINTN size; + VOID *data; + EFI_STATUS efirc; + int rc; + + /* Sanity check */ + assert ( efi_autoexec == NULL ); + assert ( efi_autoexec_len == 0 ); + + /* Open simple file system protocol */ + if ( ( efirc = bs->OpenProtocol ( device, + &efi_simple_file_system_protocol_guid, + &u.interface, efi_image_handle, + device, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s has no filesystem instance: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_filesystem; + } + + /* Open root directory */ + if ( ( efirc = u.fs->OpenVolume ( u.fs, &root ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s could not open volume: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_volume; + } + + /* Open autoexec script */ + if ( ( efirc = root->Open ( root, &file, name, + EFI_FILE_MODE_READ, 0 ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s has no %ls: %s\n", + efi_handle_name ( device ), name, strerror ( rc ) ); + goto err_open; + } + + /* Get file information */ + size = sizeof ( info ); + if ( ( efirc = file->GetInfo ( file, &efi_file_info_id, &size, + &info ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s could not get %ls info: %s\n", + efi_handle_name ( device ), name, strerror ( rc ) ); + goto err_getinfo; + } + size = info.info.FileSize; + + /* Ignore zero-length files */ + if ( ! size ) { + rc = -EINVAL; + DBGC ( device, "EFI %s has zero-length %ls\n", + efi_handle_name ( device ), name ); + goto err_empty; + } + + /* Allocate temporary copy */ + if ( ( efirc = bs->AllocatePool ( EfiBootServicesData, size, + &data ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s could not allocate %ls: %s\n", + efi_handle_name ( device ), name, strerror ( rc ) ); + goto err_alloc; + } + + /* Read file */ + if ( ( efirc = file->Read ( file, &size, data ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s could not read %ls: %s\n", + efi_handle_name ( device ), name, strerror ( rc ) ); + goto err_read; + } + + /* Record autoexec script */ + efi_autoexec = data; + efi_autoexec_len = size; + data = NULL; + DBGC ( device, "EFI %s found %ls\n", + efi_handle_name ( device ), name ); + + /* Success */ + rc = 0; + + err_read: + if ( data ) + bs->FreePool ( data ); + err_alloc: + err_empty: + err_getinfo: + file->Close ( file ); + err_open: + root->Close ( root ); + err_volume: + bs->CloseProtocol ( device, &efi_simple_file_system_protocol_guid, + efi_image_handle, device ); + err_filesystem: + return rc; +} + +/** + * Register autoexec script + * + */ +static void efi_autoexec_startup ( void ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE device = efi_loaded_image->DeviceHandle; + const char *name = AUTOEXEC_NAME; + struct image *image; + + /* Do nothing if we have no autoexec script */ + if ( ! efi_autoexec ) + return; + + /* Create autoexec image */ + image = image_memory ( name, virt_to_user ( efi_autoexec ), + efi_autoexec_len ); + if ( ! image ) { + DBGC ( device, "EFI %s could not create %s\n", + efi_handle_name ( device ), name ); + return; + } + DBGC ( device, "EFI %s registered %s\n", + efi_handle_name ( device ), name ); + + /* Free temporary copy */ + bs->FreePool ( efi_autoexec ); + efi_autoexec = NULL; +} + +/** Autoexec script startup function */ +struct startup_fn efi_autoexec_startup_fn __startup_fn ( STARTUP_NORMAL ) = { + .name = "efi_autoexec", + .startup = efi_autoexec_startup, +}; diff --git a/src/interface/efi/efi_blacklist.c b/src/interface/efi/efi_blacklist.c deleted file mode 100644 index 292b28e8c..000000000 --- a/src/interface/efi/efi_blacklist.c +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (C) 2019 Michael Brown . - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/** @file - * - * EFI driver blacklist - * - */ - -/** A blacklisted driver */ -struct efi_blacklist { - /** Name */ - const char *name; - /** - * Check if driver is blacklisted - * - * @v binding Driver binding protocol - * @v loaded Loaded image protocol - * @v wtf Component name protocol, if present - * @ret blacklisted Driver is the blacklisted driver - */ - int ( * blacklist ) ( EFI_DRIVER_BINDING_PROTOCOL *binding, - EFI_LOADED_IMAGE_PROTOCOL *loaded, - EFI_COMPONENT_NAME_PROTOCOL *wtf ); -}; - -/** - * Blacklist Dell Ip4ConfigDxe driver - * - * @v binding Driver binding protocol - * @v loaded Loaded image protocol - * @v wtf Component name protocol, if present - * @ret blacklisted Driver is the blacklisted driver - */ -static int -efi_blacklist_dell_ip4config ( EFI_DRIVER_BINDING_PROTOCOL *binding __unused, - EFI_LOADED_IMAGE_PROTOCOL *loaded __unused, - EFI_COMPONENT_NAME_PROTOCOL *wtf ) { - static const CHAR16 ip4cfg[] = L"IP4 CONFIG Network Service Driver"; - static const char dell[] = "Dell Inc."; - char manufacturer[ sizeof ( dell ) ]; - CHAR16 *name; - - /* Check driver name */ - if ( ! wtf ) - return 0; - if ( wtf->GetDriverName ( wtf, "eng", &name ) != 0 ) - return 0; - if ( memcmp ( name, ip4cfg, sizeof ( ip4cfg ) ) != 0 ) - return 0; - - /* Check manufacturer */ - fetch_string_setting ( NULL, &manufacturer_setting, manufacturer, - sizeof ( manufacturer ) ); - if ( strcmp ( manufacturer, dell ) != 0 ) - return 0; - - return 1; -} - -/** Blacklisted drivers */ -static struct efi_blacklist efi_blacklists[] = { - { - .name = "Dell Ip4Config", - .blacklist = efi_blacklist_dell_ip4config, - }, -}; - -/** - * Find driver blacklisting, if any - * - * @v driver Driver binding handle - * @ret blacklist Driver blacklisting, or NULL - * @ret rc Return status code - */ -static int efi_blacklist ( EFI_HANDLE driver, - struct efi_blacklist **blacklist ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; - union { - EFI_DRIVER_BINDING_PROTOCOL *binding; - void *interface; - } binding; - union { - EFI_LOADED_IMAGE_PROTOCOL *loaded; - void *interface; - } loaded; - union { - EFI_COMPONENT_NAME_PROTOCOL *wtf; - void *interface; - } wtf; - unsigned int i; - EFI_HANDLE image; - EFI_STATUS efirc; - int rc; - - DBGC2 ( &efi_blacklists, "EFIBL checking %s\n", - efi_handle_name ( driver ) ); - - /* Mark as not blacklisted */ - *blacklist = NULL; - - /* Open driver binding protocol */ - if ( ( efirc = bs->OpenProtocol ( - driver, &efi_driver_binding_protocol_guid, - &binding.interface, efi_image_handle, driver, - EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { - rc = -EEFI ( efirc ); - DBGC ( driver, "EFIBL %s could not open driver binding " - "protocol: %s\n", efi_handle_name ( driver ), - strerror ( rc ) ); - goto err_binding; - } - image = binding.binding->ImageHandle; - - /* Open loaded image protocol */ - if ( ( efirc = bs->OpenProtocol ( - image, &efi_loaded_image_protocol_guid, - &loaded.interface, efi_image_handle, image, - EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { - rc = -EEFI ( efirc ); - DBGC ( driver, "EFIBL %s could not open", - efi_handle_name ( driver ) ); - DBGC ( driver, " %s loaded image protocol: %s\n", - efi_handle_name ( image ), strerror ( rc ) ); - goto err_loaded; - } - - /* Open component name protocol, if present*/ - if ( ( efirc = bs->OpenProtocol ( - driver, &efi_component_name_protocol_guid, - &wtf.interface, efi_image_handle, driver, - EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { - /* Ignore failure; is not required to be present */ - wtf.interface = NULL; - } - - /* Check blacklistings */ - for ( i = 0 ; i < ( sizeof ( efi_blacklists ) / - sizeof ( efi_blacklists[0] ) ) ; i++ ) { - if ( efi_blacklists[i].blacklist ( binding.binding, - loaded.loaded, wtf.wtf ) ) { - *blacklist = &efi_blacklists[i]; - break; - } - } - - /* Success */ - rc = 0; - - /* Close protocols */ - if ( wtf.wtf ) { - bs->CloseProtocol ( driver, &efi_component_name_protocol_guid, - efi_image_handle, driver ); - } - bs->CloseProtocol ( image, &efi_loaded_image_protocol_guid, - efi_image_handle, image ); - err_loaded: - bs->CloseProtocol ( driver, &efi_driver_binding_protocol_guid, - efi_image_handle, driver ); - err_binding: - return rc; -} - -/** - * Unload any blacklisted drivers - * - */ -void efi_unload_blacklist ( void ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; - struct efi_blacklist *blacklist; - EFI_HANDLE *drivers; - EFI_HANDLE driver; - UINTN num_drivers; - unsigned int i; - EFI_STATUS efirc; - int rc; - - /* Locate all driver binding protocol handles */ - if ( ( efirc = bs->LocateHandleBuffer ( - ByProtocol, &efi_driver_binding_protocol_guid, - NULL, &num_drivers, &drivers ) ) != 0 ) { - rc = -EEFI ( efirc ); - DBGC ( &efi_blacklists, "EFIBL could not list all drivers: " - "%s\n", strerror ( rc ) ); - return; - } - - /* Unload any blacklisted drivers */ - for ( i = 0 ; i < num_drivers ; i++ ) { - driver = drivers[i]; - if ( ( rc = efi_blacklist ( driver, &blacklist ) ) != 0 ) { - DBGC ( driver, "EFIBL could not determine " - "blacklisting for %s: %s\n", - efi_handle_name ( driver ), strerror ( rc ) ); - continue; - } - if ( ! blacklist ) - continue; - DBGC ( driver, "EFIBL unloading %s (%s)\n", - efi_handle_name ( driver ), blacklist->name ); - if ( ( efirc = bs->UnloadImage ( driver ) ) != 0 ) { - DBGC ( driver, "EFIBL could not unload %s: %s\n", - efi_handle_name ( driver ), strerror ( rc ) ); - } - } - - /* Free handle list */ - bs->FreePool ( drivers ); -} diff --git a/src/interface/efi/efi_block.c b/src/interface/efi/efi_block.c index 91f830a11..74cf7c0c0 100644 --- a/src/interface/efi/efi_block.c +++ b/src/interface/efi/efi_block.c @@ -54,7 +54,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include +#include +#include #include /** ACPI table protocol protocol */ @@ -64,23 +65,6 @@ EFI_REQUEST_PROTOCOL ( EFI_ACPI_TABLE_PROTOCOL, &acpi ); /** Boot filename */ static wchar_t efi_block_boot_filename[] = EFI_REMOVABLE_MEDIA_FILE_NAME; -/** iPXE EFI block device vendor device path GUID */ -#define IPXE_BLOCK_DEVICE_PATH_GUID \ - { 0x8998b594, 0xf531, 0x4e87, \ - { 0x8b, 0xdf, 0x8f, 0x88, 0x54, 0x3e, 0x99, 0xd4 } } - -/** iPXE EFI block device vendor device path GUID */ -static EFI_GUID ipxe_block_device_path_guid - = IPXE_BLOCK_DEVICE_PATH_GUID; - -/** An iPXE EFI block device vendor device path */ -struct efi_block_vendor_path { - /** Generic vendor device path */ - VENDOR_DEVICE_PATH vendor; - /** Block device URI */ - CHAR16 uri[0]; -} __attribute__ (( packed )); - /** EFI SAN device private data */ struct efi_block_data { /** SAN device */ @@ -237,7 +221,7 @@ static void efi_block_connect ( struct san_device *sandev ) { /* Try to connect all possible drivers to this block device */ if ( ( efirc = bs->ConnectController ( block->handle, NULL, - NULL, 1 ) ) != 0 ) { + NULL, TRUE ) ) != 0 ) { rc = -EEFI ( efirc ); DBGC ( sandev, "EFIBLK %#02x could not connect drivers: %s\n", sandev->drive, strerror ( rc ) ); @@ -259,16 +243,9 @@ static void efi_block_connect ( struct san_device *sandev ) { static int efi_block_hook ( unsigned int drive, struct uri **uris, unsigned int count, unsigned int flags ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; - EFI_DEVICE_PATH_PROTOCOL *end; - struct efi_block_vendor_path *vendor; - struct efi_snp_device *snpdev; struct san_device *sandev; struct efi_block_data *block; - size_t prefix_len; - size_t uri_len; - size_t vendor_len; - size_t len; - char *uri_buf; + int leak = 0; EFI_STATUS efirc; int rc; @@ -279,24 +256,8 @@ static int efi_block_hook ( unsigned int drive, struct uri **uris, goto err_no_uris; } - /* Find an appropriate parent device handle */ - snpdev = last_opened_snpdev(); - if ( ! snpdev ) { - DBG ( "EFIBLK could not identify SNP device\n" ); - rc = -ENODEV; - goto err_no_snpdev; - } - - /* Calculate length of private data */ - prefix_len = efi_devpath_len ( snpdev->path ); - uri_len = format_uri ( uris[0], NULL, 0 ); - vendor_len = ( sizeof ( *vendor ) + - ( ( uri_len + 1 /* NUL */ ) * sizeof ( wchar_t ) ) ); - len = ( sizeof ( *block ) + uri_len + 1 /* NUL */ + prefix_len + - vendor_len + sizeof ( *end ) ); - /* Allocate and initialise structure */ - sandev = alloc_sandev ( uris, count, len ); + sandev = alloc_sandev ( uris, count, sizeof ( *block ) ); if ( ! sandev ) { rc = -ENOMEM; goto err_alloc; @@ -311,26 +272,6 @@ static int efi_block_hook ( unsigned int drive, struct uri **uris, block->block_io.ReadBlocks = efi_block_io_read; block->block_io.WriteBlocks = efi_block_io_write; block->block_io.FlushBlocks = efi_block_io_flush; - uri_buf = ( ( ( void * ) block ) + sizeof ( *block ) ); - block->path = ( ( ( void * ) uri_buf ) + uri_len + 1 /* NUL */ ); - - /* Construct device path */ - memcpy ( block->path, snpdev->path, prefix_len ); - vendor = ( ( ( void * ) block->path ) + prefix_len ); - vendor->vendor.Header.Type = HARDWARE_DEVICE_PATH; - vendor->vendor.Header.SubType = HW_VENDOR_DP; - vendor->vendor.Header.Length[0] = ( vendor_len & 0xff ); - vendor->vendor.Header.Length[1] = ( vendor_len >> 8 ); - memcpy ( &vendor->vendor.Guid, &ipxe_block_device_path_guid, - sizeof ( vendor->vendor.Guid ) ); - format_uri ( uris[0], uri_buf, ( uri_len + 1 /* NUL */ ) ); - efi_snprintf ( vendor->uri, ( uri_len + 1 /* NUL */ ), "%s", uri_buf ); - end = ( ( ( void * ) vendor ) + vendor_len ); - end->Type = END_DEVICE_PATH_TYPE; - end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; - end->Length[0] = sizeof ( *end ); - DBGC ( sandev, "EFIBLK %#02x has device path %s\n", - drive, efi_devpath_text ( block->path ) ); /* Register SAN device */ if ( ( rc = register_sandev ( sandev, drive, flags ) ) != 0 ) { @@ -345,6 +286,22 @@ static int efi_block_hook ( unsigned int drive, struct uri **uris, block->media.LastBlock = ( ( sandev->capacity.blocks >> sandev->blksize_shift ) - 1 ); + /* Construct device path */ + if ( ! sandev->active ) { + rc = -ENODEV; + DBGC ( sandev, "EFIBLK %#02x not active after registration\n", + drive ); + goto err_active; + } + block->path = efi_describe ( &sandev->active->block ); + if ( ! block->path ) { + rc = -ENODEV; + DBGC ( sandev, "EFIBLK %#02x has no device path\n", drive ); + goto err_describe; + } + DBGC ( sandev, "EFIBLK %#02x has device path %s\n", + drive, efi_devpath_text ( block->path ) ); + /* Install protocols */ if ( ( efirc = bs->InstallMultipleProtocolInterfaces ( &block->handle, @@ -362,17 +319,33 @@ static int efi_block_hook ( unsigned int drive, struct uri **uris, return drive; - bs->UninstallMultipleProtocolInterfaces ( + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( block->handle, &efi_block_io_protocol_guid, &block->block_io, - &efi_device_path_protocol_guid, block->path, NULL ); + &efi_device_path_protocol_guid, block->path, + NULL ) ) != 0 ) { + DBGC ( sandev, "EFIBLK %#02x could not uninstall protocols: " + "%s\n", sandev->drive, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_block ( &block->block_io ); err_install: + if ( ! leak ) { + free ( block->path ); + block->path = NULL; + } + err_describe: + err_active: unregister_sandev ( sandev ); err_register: - sandev_put ( sandev ); + if ( ! leak ) + sandev_put ( sandev ); err_alloc: - err_no_snpdev: err_no_uris: + if ( leak ) { + DBGC ( sandev, "EFIBLK %#02x nullified and leaked\n", + sandev->drive ); + } return rc; } @@ -385,6 +358,8 @@ static void efi_block_unhook ( unsigned int drive ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct san_device *sandev; struct efi_block_data *block; + int leak = efi_shutdown_in_progress; + EFI_STATUS efirc; /* Find SAN device */ sandev = sandev_find ( drive ); @@ -395,16 +370,36 @@ static void efi_block_unhook ( unsigned int drive ) { block = sandev->priv; /* Uninstall protocols */ - bs->UninstallMultipleProtocolInterfaces ( + if ( ( ! efi_shutdown_in_progress ) && + ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( block->handle, &efi_block_io_protocol_guid, &block->block_io, - &efi_device_path_protocol_guid, block->path, NULL ); + &efi_device_path_protocol_guid, block->path, + NULL ) ) != 0 ) ) { + DBGC ( sandev, "EFIBLK %#02x could not uninstall protocols: " + "%s\n", sandev->drive, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_block ( &block->block_io ); + + /* Free device path */ + if ( ! leak ) { + free ( block->path ); + block->path = NULL; + } /* Unregister SAN device */ unregister_sandev ( sandev ); /* Drop reference to drive */ - sandev_put ( sandev ); + if ( ! leak ) + sandev_put ( sandev ); + + /* Report leakage, if applicable */ + if ( leak && ( ! efi_shutdown_in_progress ) ) { + DBGC ( sandev, "EFIBLK %#02x nullified and leaked\n", + sandev->drive ); + } } /** An installed ACPI table */ @@ -450,17 +445,17 @@ static int efi_block_install ( struct acpi_header *hdr ) { rc = -EEFI ( efirc ); DBGC ( acpi, "EFIBLK could not install %s: %s\n", acpi_name ( hdr->signature ), strerror ( rc ) ); - DBGC_HDA ( acpi, 0, hdr, len ); + DBGC2_HDA ( acpi, 0, hdr, len ); goto err_install; } /* Add to list of installed tables */ list_add_tail ( &installed->list, &efi_acpi_tables ); - DBGC ( acpi, "EFIBLK installed %s as ACPI table %#lx:\n", + DBGC ( acpi, "EFIBLK installed %s as ACPI table %#lx\n", acpi_name ( hdr->signature ), ( ( unsigned long ) installed->key ) ); - DBGC_HDA ( acpi, 0, hdr, len ); + DBGC2_HDA ( acpi, 0, hdr, len ); return 0; list_del ( &installed->list ); @@ -551,7 +546,7 @@ static int efi_block_boot_image ( struct san_device *sandev, EFI_HANDLE handle, } /* Check if this device is a child of our block device */ - prefix_len = efi_devpath_len ( block->path ); + prefix_len = efi_path_len ( block->path ); if ( memcmp ( path.path, block->path, prefix_len ) != 0 ) { /* Not a child device */ rc = -ENOTTY; @@ -561,7 +556,7 @@ static int efi_block_boot_image ( struct san_device *sandev, EFI_HANDLE handle, sandev->drive, efi_devpath_text ( path.path ) ); /* Construct device path for boot image */ - end = efi_devpath_end ( path.path ); + end = efi_path_end ( path.path ); prefix_len = ( ( ( void * ) end ) - ( ( void * ) path.path ) ); filepath_len = ( SIZE_OF_FILEPATH_DEVICE_PATH + ( filename ? @@ -594,11 +589,14 @@ static int efi_block_boot_image ( struct san_device *sandev, EFI_HANDLE handle, sandev->drive, efi_devpath_text ( boot_path ) ); /* Try loading boot image from this device */ + *image = NULL; if ( ( efirc = bs->LoadImage ( FALSE, efi_image_handle, boot_path, NULL, 0, image ) ) != 0 ) { rc = -EEFI ( efirc ); DBGC ( sandev, "EFIBLK %#02x could not load image: %s\n", sandev->drive, strerror ( rc ) ); + if ( efirc == EFI_SECURITY_VIOLATION ) + bs->UnloadImage ( *image ); goto err_load_image; } diff --git a/src/interface/efi/efi_bofm.c b/src/interface/efi/efi_bofm.c index 00f6a1d5c..15f3837cc 100644 --- a/src/interface/efi/efi_bofm.c +++ b/src/interface/efi/efi_bofm.c @@ -164,7 +164,7 @@ static EFI_GUID bofm2_protocol_guid = */ static int efi_bofm_supported ( EFI_HANDLE device ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; - struct pci_device pci; + struct efi_pci_device efipci; union { IBM_BOFM_DRIVER_CONFIGURATION_PROTOCOL *bofm1; void *interface; @@ -173,11 +173,11 @@ static int efi_bofm_supported ( EFI_HANDLE device ) { int rc; /* Get PCI device information */ - if ( ( rc = efipci_info ( device, &pci ) ) != 0 ) + if ( ( rc = efipci_info ( device, &efipci ) ) != 0 ) return rc; /* Look for a BOFM driver */ - if ( ( rc = bofm_find_driver ( &pci ) ) != 0 ) { + if ( ( rc = bofm_find_driver ( &efipci.pci ) ) != 0 ) { DBGCP ( device, "EFIBOFM %s has no driver\n", efi_handle_name ( device ) ); return rc; @@ -204,7 +204,7 @@ static int efi_bofm_supported ( EFI_HANDLE device ) { } DBGC ( device, "EFIBOFM %s has driver \"%s\"\n", - efi_handle_name ( device ), pci.id->name ); + efi_handle_name ( device ), efipci.pci.id->name ); return 0; } @@ -225,7 +225,7 @@ static int efi_bofm_start ( struct efi_device *efidev ) { IBM_BOFM_DRIVER_CONFIGURATION_PROTOCOL2 *bofm2; void *interface; } bofm2; - struct pci_device pci; + struct efi_pci_device efipci; IBM_BOFM_TABLE *bofmtab; IBM_BOFM_TABLE *bofmtab2; int bofmrc; @@ -234,7 +234,7 @@ static int efi_bofm_start ( struct efi_device *efidev ) { /* Open PCI device, if possible */ if ( ( rc = efipci_open ( device, EFI_OPEN_PROTOCOL_GET_PROTOCOL, - &pci ) ) != 0 ) + &efipci ) ) != 0 ) goto err_open; /* Locate BOFM protocol */ @@ -274,7 +274,8 @@ static int efi_bofm_start ( struct efi_device *efidev ) { efi_handle_name ( device ) ); DBGC2_HD ( device, bofmtab2, bofmtab2->Parameters.Length ); } - bofmrc = bofm ( virt_to_user ( bofmtab2 ? bofmtab2 : bofmtab ), &pci ); + bofmrc = bofm ( virt_to_user ( bofmtab2 ? bofmtab2 : bofmtab ), + &efipci.pci ); DBGC ( device, "EFIBOFM %s status %08x\n", efi_handle_name ( device ), bofmrc ); DBGC2 ( device, "EFIBOFM %s version 1 after processing:\n", diff --git a/src/interface/efi/efi_cachedhcp.c b/src/interface/efi/efi_cachedhcp.c new file mode 100644 index 000000000..1d4b98fd6 --- /dev/null +++ b/src/interface/efi/efi_cachedhcp.c @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI cached DHCP packet + * + */ + +/** + * Record cached DHCP packet + * + * @v device Device handle + * @ret rc Return status code + */ +int efi_cachedhcp_record ( EFI_HANDLE device ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + EFI_PXE_BASE_CODE_PROTOCOL *pxe; + void *interface; + } pxe; + EFI_PXE_BASE_CODE_MODE *mode; + EFI_STATUS efirc; + int rc; + + /* Look for a PXE base code instance on the image's device handle */ + if ( ( efirc = bs->OpenProtocol ( device, + &efi_pxe_base_code_protocol_guid, + &pxe.interface, efi_image_handle, + NULL, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ + rc = -EEFI ( efirc ); + DBGC ( device, "EFI %s has no PXE base code instance: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_open; + } + + /* Do not attempt to cache IPv6 packets */ + mode = pxe.pxe->Mode; + if ( mode->UsingIpv6 ) { + rc = -ENOTSUP; + DBGC ( device, "EFI %s has IPv6 PXE base code\n", + efi_handle_name ( device ) ); + goto err_ipv6; + } + + /* Record DHCPACK, if present */ + if ( mode->DhcpAckReceived && + ( ( rc = cachedhcp_record ( &cached_dhcpack, + virt_to_user ( &mode->DhcpAck ), + sizeof ( mode->DhcpAck ) ) ) != 0 ) ) { + DBGC ( device, "EFI %s could not record DHCPACK: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_dhcpack; + } + + /* Record ProxyDHCPOFFER, if present */ + if ( mode->ProxyOfferReceived && + ( ( rc = cachedhcp_record ( &cached_proxydhcp, + virt_to_user ( &mode->ProxyOffer ), + sizeof ( mode->ProxyOffer ) ) ) != 0)){ + DBGC ( device, "EFI %s could not record ProxyDHCPOFFER: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_proxydhcp; + } + + /* Record PxeBSACK, if present */ + if ( mode->PxeReplyReceived && + ( ( rc = cachedhcp_record ( &cached_pxebs, + virt_to_user ( &mode->PxeReply ), + sizeof ( mode->PxeReply ) ) ) != 0)){ + DBGC ( device, "EFI %s could not record PXEBSACK: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_pxebs; + } + + /* Success */ + rc = 0; + + err_pxebs: + err_proxydhcp: + err_dhcpack: + err_ipv6: + bs->CloseProtocol ( device, &efi_pxe_base_code_protocol_guid, + efi_image_handle, NULL ); + err_open: + return rc; +} diff --git a/src/interface/efi/efi_console.c b/src/interface/efi/efi_console.c index 047baed47..98ebbf3ac 100644 --- a/src/interface/efi/efi_console.c +++ b/src/interface/efi/efi_console.c @@ -54,6 +54,8 @@ FILE_LICENCE ( GPL2_OR_LATER ); #define ATTR_DEFAULT ATTR_FCOL_WHITE +#define CTRL_MASK 0x1f + /* Set default console usage if applicable */ #if ! ( defined ( CONSOLE_EFI ) && CONSOLE_EXPLICIT ( CONSOLE_EFI ) ) #undef CONSOLE_EFI @@ -67,6 +69,9 @@ static unsigned int efi_attr = ATTR_DEFAULT; static EFI_CONSOLE_CONTROL_PROTOCOL *conctrl; EFI_REQUEST_PROTOCOL ( EFI_CONSOLE_CONTROL_PROTOCOL, &conctrl ); +/** Extended simple text input protocol, if present */ +static EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *efi_conin_ex; + /** * Handle ANSI CUP (cursor position) * @@ -278,8 +283,9 @@ static const char * scancode_to_ansi_seq ( unsigned int scancode ) { */ static int efi_getchar ( void ) { EFI_SIMPLE_TEXT_INPUT_PROTOCOL *conin = efi_systab->ConIn; + EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *conin_ex = efi_conin_ex; const char *ansi_seq; - EFI_INPUT_KEY key; + EFI_KEY_DATA key; EFI_STATUS efirc; int rc; @@ -288,20 +294,42 @@ static int efi_getchar ( void ) { return *(ansi_input++); /* Read key from real EFI console */ - if ( ( efirc = conin->ReadKeyStroke ( conin, &key ) ) != 0 ) { - rc = -EEFI ( efirc ); - DBG ( "EFI could not read keystroke: %s\n", strerror ( rc ) ); - return 0; + memset ( &key, 0, sizeof ( key ) ); + if ( conin_ex ) { + if ( ( efirc = conin_ex->ReadKeyStrokeEx ( conin_ex, + &key ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBG ( "EFI could not read extended keystroke: %s\n", + strerror ( rc ) ); + return 0; + } + } else { + if ( ( efirc = conin->ReadKeyStroke ( conin, + &key.Key ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBG ( "EFI could not read keystroke: %s\n", + strerror ( rc ) ); + return 0; + } + } + DBG2 ( "EFI read key stroke shift %08x toggle %02x unicode %04x " + "scancode %04x\n", key.KeyState.KeyShiftState, + key.KeyState.KeyToggleState, key.Key.UnicodeChar, + key.Key.ScanCode ); + + /* Translate Ctrl- */ + if ( ( key.KeyState.KeyShiftState & EFI_SHIFT_STATE_VALID ) && + ( key.KeyState.KeyShiftState & ( EFI_LEFT_CONTROL_PRESSED | + EFI_RIGHT_CONTROL_PRESSED ) ) ) { + key.Key.UnicodeChar &= CTRL_MASK; } - DBG2 ( "EFI read key stroke with unicode %04x scancode %04x\n", - key.UnicodeChar, key.ScanCode ); /* If key has a Unicode representation, return it */ - if ( key.UnicodeChar ) - return key.UnicodeChar; + if ( key.Key.UnicodeChar ) + return key.Key.UnicodeChar; /* Otherwise, check for a special key that we know about */ - if ( ( ansi_seq = scancode_to_ansi_seq ( key.ScanCode ) ) ) { + if ( ( ansi_seq = scancode_to_ansi_seq ( key.Key.ScanCode ) ) ) { /* Start of escape sequence: return ESC (0x1b) */ ansi_input = ansi_seq; return 0x1b; @@ -319,6 +347,8 @@ static int efi_getchar ( void ) { static int efi_iskey ( void ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_SIMPLE_TEXT_INPUT_PROTOCOL *conin = efi_systab->ConIn; + EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *conin_ex = efi_conin_ex; + EFI_EVENT *event; EFI_STATUS efirc; /* If we are mid-sequence, we are always ready */ @@ -326,7 +356,8 @@ static int efi_iskey ( void ) { return 1; /* Check to see if the WaitForKey event has fired */ - if ( ( efirc = bs->CheckEvent ( conin->WaitForKey ) ) == 0 ) + event = ( conin_ex ? conin_ex->WaitForKeyEx : conin->WaitForKey ); + if ( ( efirc = bs->CheckEvent ( event ) ) == 0 ) return 1; return 0; @@ -345,7 +376,14 @@ struct console_driver efi_console __console_driver = { * */ static void efi_console_init ( void ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_CONSOLE_CONTROL_SCREEN_MODE mode; + union { + void *interface; + EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *wtf; + } u; + EFI_STATUS efirc; + int rc; /* On some older EFI 1.10 implementations, we must use the * (now obsolete) EFI_CONSOLE_CONTROL_PROTOCOL to switch the @@ -358,6 +396,23 @@ static void efi_console_init ( void ) { EfiConsoleControlScreenText ); } } + + /* Attempt to open the Simple Text Input Ex protocol on the + * console input handle. This is provably unsafe, but is + * apparently the expected behaviour for all UEFI + * applications. Don't ask. + */ + if ( ( efirc = bs->OpenProtocol ( efi_systab->ConsoleInHandle, + &efi_simple_text_input_ex_protocol_guid, + &u.interface, efi_image_handle, + efi_systab->ConsoleInHandle, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) == 0 ) { + efi_conin_ex = u.wtf; + DBG ( "EFI using SimpleTextInputEx\n" ); + } else { + rc = -EEFI ( efirc ); + DBG ( "EFI has no SimpleTextInputEx: %s\n", strerror ( rc ) ); + } } /** diff --git a/src/interface/efi/efi_debug.c b/src/interface/efi/efi_debug.c index de9b1af55..967bb6182 100644 --- a/src/interface/efi/efi_debug.c +++ b/src/interface/efi/efi_debug.c @@ -37,7 +37,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include +#include #include #include #include @@ -189,7 +189,7 @@ static struct efi_well_known_guid efi_well_known_guids[] = { * @v guid GUID * @ret string Printable string */ -const __attribute__ (( pure )) char * efi_guid_ntoa ( EFI_GUID *guid ) { +const __attribute__ (( pure )) char * efi_guid_ntoa ( CONST EFI_GUID *guid ) { union { union uuid uuid; EFI_GUID guid; @@ -262,6 +262,28 @@ efi_open_attributes_name ( unsigned int attributes ) { return name; } +/** + * Print opened protocol information + * + * @v handle EFI handle + * @V protocol Protocol GUID + * @v opener Opened protocol information + */ +void dbg_efi_opener ( EFI_HANDLE handle, EFI_GUID *protocol, + EFI_OPEN_PROTOCOL_INFORMATION_ENTRY *opener ) { + + printf ( "HANDLE %s %s opened %dx (%s)", efi_handle_name ( handle ), + efi_guid_ntoa ( protocol ), opener->OpenCount, + efi_open_attributes_name ( opener->Attributes ) ); + printf ( " by %s", efi_handle_name ( opener->AgentHandle ) ); + if ( opener->ControllerHandle == handle ) { + printf ( "\n" ); + } else { + printf ( " for %s\n", + efi_handle_name ( opener->ControllerHandle ) ); + } +} + /** * Print list of openers of a given protocol on a given handle * @@ -271,7 +293,6 @@ efi_open_attributes_name ( unsigned int attributes ) { void dbg_efi_openers ( EFI_HANDLE handle, EFI_GUID *protocol ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_OPEN_PROTOCOL_INFORMATION_ENTRY *openers; - EFI_OPEN_PROTOCOL_INFORMATION_ENTRY *opener; UINTN count; unsigned int i; EFI_STATUS efirc; @@ -296,20 +317,8 @@ void dbg_efi_openers ( EFI_HANDLE handle, EFI_GUID *protocol ) { } /* Dump list of openers */ - for ( i = 0 ; i < count ; i++ ) { - opener = &openers[i]; - printf ( "HANDLE %s %s opened %dx (%s)", - efi_handle_name ( handle ), - efi_guid_ntoa ( protocol ), opener->OpenCount, - efi_open_attributes_name ( opener->Attributes ) ); - printf ( " by %s", efi_handle_name ( opener->AgentHandle ) ); - if ( opener->ControllerHandle == handle ) { - printf ( "\n" ); - } else { - printf ( " for %s\n", - efi_handle_name ( opener->ControllerHandle ) ); - } - } + for ( i = 0 ; i < count ; i++ ) + dbg_efi_opener ( handle, protocol, &openers[i] ); /* Free list */ bs->FreePool ( openers ); @@ -365,7 +374,7 @@ void dbg_efi_protocols ( EFI_HANDLE handle ) { const __attribute__ (( pure )) char * efi_devpath_text ( EFI_DEVICE_PATH_PROTOCOL *path ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; - static char text[256]; + static char text[512]; size_t len; CHAR16 *wtext; @@ -378,7 +387,7 @@ efi_devpath_text ( EFI_DEVICE_PATH_PROTOCOL *path ) { /* If we have no DevicePathToText protocol then use a raw hex string */ if ( ! efidpt ) { DBG ( "[No DevicePathToText]" ); - len = efi_devpath_len ( path ); + len = efi_path_len ( path ); base16_encode ( path, len, text, sizeof ( text ) ); return text; } diff --git a/src/interface/efi/efi_download.c b/src/interface/efi/efi_download.c index 1218852e2..8d12bd57c 100644 --- a/src/interface/efi/efi_download.c +++ b/src/interface/efi/efi_download.c @@ -138,8 +138,11 @@ efi_download_start ( IPXE_DOWNLOAD_PROTOCOL *This __unused, struct efi_download_file *file; int rc; + efi_snp_claim(); + file = malloc ( sizeof ( struct efi_download_file ) ); if ( file == NULL ) { + efi_snp_release(); return EFI_OUT_OF_RESOURCES; } @@ -147,10 +150,10 @@ efi_download_start ( IPXE_DOWNLOAD_PROTOCOL *This __unused, rc = xfer_open ( &file->xfer, LOCATION_URI_STRING, Url ); if ( rc ) { free ( file ); + efi_snp_release(); return EFIRC ( rc ); } - efi_snp_claim(); file->pos = 0; file->data_callback = DataCallback; file->finish_callback = FinishCallback; diff --git a/src/interface/efi/efi_driver.c b/src/interface/efi/efi_driver.c index 7be2e585d..8e537d535 100644 --- a/src/interface/efi/efi_driver.c +++ b/src/interface/efi/efi_driver.c @@ -30,7 +30,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include #include -#include +#include #include /** @file @@ -39,6 +39,20 @@ FILE_LICENCE ( GPL2_OR_LATER ); * */ +/* Disambiguate the various error causes */ +#define EINFO_EEFI_CONNECT \ + __einfo_uniqify ( EINFO_EPLATFORM, 0x01, \ + "Could not connect controllers" ) +#define EINFO_EEFI_CONNECT_PROHIBITED \ + __einfo_platformify ( EINFO_EEFI_CONNECT, \ + EFI_SECURITY_VIOLATION, \ + "Connecting controllers prohibited by " \ + "security policy" ) +#define EEFI_CONNECT_PROHIBITED \ + __einfo_error ( EINFO_EEFI_CONNECT_PROHIBITED ) +#define EEFI_CONNECT( efirc ) EPLATFORM ( EINFO_EEFI_CONNECT, efirc, \ + EEFI_CONNECT_PROHIBITED ) + static EFI_DRIVER_BINDING_PROTOCOL efi_driver_binding; /** List of controlled EFI devices */ @@ -142,13 +156,13 @@ efi_driver_start ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_driver *efidrv; struct efi_device *efidev; + struct efi_saved_tpl tpl; union { EFI_DEVICE_PATH_PROTOCOL *path; void *interface; } path; EFI_DEVICE_PATH_PROTOCOL *path_end; size_t path_len; - EFI_TPL saved_tpl; EFI_STATUS efirc; int rc; @@ -167,7 +181,7 @@ efi_driver_start ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, } /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Do nothing if we are currently disconnecting drivers */ if ( efi_driver_disconnecting ) { @@ -188,7 +202,7 @@ efi_driver_start ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, efi_handle_name ( device ), strerror ( rc ) ); goto err_open_path; } - path_len = ( efi_devpath_len ( path.path ) + sizeof ( *path_end ) ); + path_len = ( efi_path_len ( path.path ) + sizeof ( *path_end ) ); /* Allocate and initialise structure */ efidev = zalloc ( sizeof ( *efidev ) + path_len ); @@ -222,7 +236,7 @@ efi_driver_start ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, DBGC ( device, "EFIDRV %s using driver \"%s\"\n", efi_handle_name ( device ), efidev->driver->name ); - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return 0; } DBGC ( device, "EFIDRV %s could not start driver \"%s\": %s\n", @@ -240,7 +254,7 @@ efi_driver_start ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, } err_open_path: err_disconnecting: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); err_already_started: return efirc; } @@ -259,10 +273,9 @@ static EFI_STATUS EFIAPI efi_driver_stop ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, EFI_HANDLE device, UINTN num_children, EFI_HANDLE *children ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_driver *efidrv; struct efi_device *efidev; - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; UINTN i; DBGC ( device, "EFIDRV %s DRIVER_STOP", efi_handle_name ( device ) ); @@ -281,7 +294,7 @@ efi_driver_stop ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, } /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Stop this device */ efidrv = efidev->driver; @@ -290,7 +303,7 @@ efi_driver_stop ( EFI_DRIVER_BINDING_PROTOCOL *driver __unused, list_del ( &efidev->dev.siblings ); free ( efidev ); - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return 0; } @@ -456,11 +469,20 @@ static int efi_driver_connect ( EFI_HANDLE device ) { DBGC ( device, "EFIDRV %s connecting new drivers\n", efi_handle_name ( device ) ); if ( ( efirc = bs->ConnectController ( device, drivers, NULL, - FALSE ) ) != 0 ) { - rc = -EEFI ( efirc ); + TRUE ) ) != 0 ) { + rc = -EEFI_CONNECT ( efirc ); DBGC ( device, "EFIDRV %s could not connect new drivers: " "%s\n", efi_handle_name ( device ), strerror ( rc ) ); - return rc; + DBGC ( device, "EFIDRV %s connecting driver directly\n", + efi_handle_name ( device ) ); + if ( ( efirc = efi_driver_start ( &efi_driver_binding, device, + NULL ) ) != 0 ) { + rc = -EEFI_CONNECT ( efirc ); + DBGC ( device, "EFIDRV %s could not connect driver " + "directly: %s\n", efi_handle_name ( device ), + strerror ( rc ) ); + return rc; + } } DBGC2 ( device, "EFIDRV %s after connecting:\n", efi_handle_name ( device ) ); @@ -497,7 +519,7 @@ static int efi_driver_reconnect ( EFI_HANDLE device ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; /* Reconnect any available driver */ - bs->ConnectController ( device, NULL, NULL, FALSE ); + bs->ConnectController ( device, NULL, NULL, TRUE ); return 0; } diff --git a/src/interface/efi/efi_entropy.c b/src/interface/efi/efi_entropy.c index 2a2fc9054..70cd06293 100644 --- a/src/interface/efi/efi_entropy.c +++ b/src/interface/efi/efi_entropy.c @@ -79,8 +79,8 @@ static int efi_entropy_enable ( void ) { DBGC ( &tick, "ENTROPY %s RNG protocol\n", ( efirng ? "has" : "has no" ) ); - /* Drop to TPL_APPLICATION to allow timer tick event to take place */ - bs->RestoreTPL ( TPL_APPLICATION ); + /* Drop to external TPL to allow timer tick event to take place */ + bs->RestoreTPL ( efi_external_tpl ); /* Create timer tick event */ if ( ( efirc = bs->CreateEvent ( EVT_TIMER, TPL_NOTIFY, NULL, NULL, @@ -179,6 +179,7 @@ static int efi_get_noise_ticks ( noise_sample_t *noise ) { * @ret rc Return status code */ static int efi_get_noise_rng ( noise_sample_t *noise ) { + static uint8_t prev[EFI_ENTROPY_RNG_LEN]; uint8_t buf[EFI_ENTROPY_RNG_LEN]; EFI_STATUS efirc; int rc; @@ -196,6 +197,17 @@ static int efi_get_noise_rng ( noise_sample_t *noise ) { return rc; } + /* Fail (and permanently disable the EFI RNG) if we get + * consecutive identical results. + */ + if ( memcmp ( buf, prev, sizeof ( buf ) ) == 0 ) { + DBGC ( &tick, "ENTROPY detected broken EFI RNG:\n" ); + DBGC_HDA ( &tick, 0, buf, sizeof ( buf ) ); + efirng = NULL; + return -EIO; + } + memcpy ( prev, buf, sizeof ( prev ) ); + /* Reduce random bytes to a single noise sample. This seems * like overkill, but we have no way of knowing how much * entropy is actually present in the bytes returned by the diff --git a/src/interface/efi/efi_file.c b/src/interface/efi/efi_file.c index 52de0987c..fc64b369c 100644 --- a/src/interface/efi/efi_file.c +++ b/src/interface/efi/efi_file.c @@ -38,6 +38,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include #include @@ -50,17 +51,54 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); /** EFI media ID */ #define EFI_MEDIA_ID_MAGIC 0x69505845 -/** An image exposed as an EFI file */ +/** An EFI virtual file reader */ +struct efi_file_reader { + /** EFI file */ + struct efi_file *file; + /** Position within virtual file */ + size_t pos; + /** Output data buffer */ + void *data; + /** Length of output data buffer */ + size_t len; +}; + +/** An EFI file */ struct efi_file { + /** Reference count */ + struct refcnt refcnt; /** EFI file protocol */ EFI_FILE_PROTOCOL file; - /** Image */ + /** Image (if any) */ struct image *image; + /** Filename */ + const char *name; /** Current file position */ size_t pos; + /** + * Read from file + * + * @v reader File reader + * @ret len Length read + */ + size_t ( * read ) ( struct efi_file_reader *reader ); }; static struct efi_file efi_file_root; +static struct efi_file efi_file_initrd; + +/** + * Free EFI file + * + * @v refcnt Reference count + */ +static void efi_file_free ( struct refcnt *refcnt ) { + struct efi_file *file = + container_of ( refcnt, struct efi_file, refcnt ); + + image_put ( file->image ); + free ( file ); +} /** * Get EFI file name (for debugging) @@ -70,28 +108,201 @@ static struct efi_file efi_file_root; */ static const char * efi_file_name ( struct efi_file *file ) { - return ( file->image ? file->image->name : "" ); + return ( file == &efi_file_root ? "" : file->name ); } /** * Find EFI file image * - * @v wname Filename + * @v name Filename * @ret image Image, or NULL */ -static struct image * efi_file_find ( const CHAR16 *wname ) { - char name[ wcslen ( wname ) + 1 /* NUL */ ]; +static struct image * efi_file_find ( const char *name ) { struct image *image; /* Find image */ - snprintf ( name, sizeof ( name ), "%ls", wname ); list_for_each_entry ( image, &images, list ) { if ( strcasecmp ( image->name, name ) == 0 ) return image; } return NULL; +} +/** + * Get length of EFI file + * + * @v file EFI file + * @ret len Length of file + */ +static size_t efi_file_len ( struct efi_file *file ) { + struct efi_file_reader reader; + + /* If this is the root directory, then treat as length zero */ + if ( ! file->read ) + return 0; + + /* Initialise reader */ + reader.file = file; + reader.pos = 0; + reader.data = NULL; + reader.len = 0; + + /* Perform dummy read to determine file length */ + file->read ( &reader ); + + return reader.pos; +} + +/** + * Read chunk of EFI file + * + * @v reader EFI file reader + * @v data Input data, or UNULL to zero-fill + * @v len Length of input data + * @ret len Length of output data + */ +static size_t efi_file_read_chunk ( struct efi_file_reader *reader, + userptr_t data, size_t len ) { + struct efi_file *file = reader->file; + size_t offset; + + /* Calculate offset into input data */ + offset = ( file->pos - reader->pos ); + + /* Consume input data range */ + reader->pos += len; + + /* Calculate output length */ + if ( offset < len ) { + len -= offset; + } else { + len = 0; + } + if ( len > reader->len ) + len = reader->len; + + /* Copy or zero output data */ + if ( data ) { + copy_from_user ( reader->data, data, offset, len ); + } else { + memset ( reader->data, 0, len ); + } + + /* Consume output buffer */ + file->pos += len; + reader->data += len; + reader->len -= len; + + return len; +} + +/** + * Read from image-backed file + * + * @v reader EFI file reader + * @ret len Length read + */ +static size_t efi_file_read_image ( struct efi_file_reader *reader ) { + struct efi_file *file = reader->file; + struct image *image = file->image; + + /* Read from file */ + return efi_file_read_chunk ( reader, image->data, image->len ); +} + +/** + * Read from magic initrd file + * + * @v reader EFI file reader + * @ret len Length read + */ +static size_t efi_file_read_initrd ( struct efi_file_reader *reader ) { + struct efi_file *file = reader->file; + struct cpio_header cpio; + struct image *image; + const char *name; + size_t pad_len; + size_t cpio_len; + size_t name_len; + size_t len; + + /* Read from file */ + len = 0; + for_each_image ( image ) { + + /* Ignore currently executing image */ + if ( image == current_image ) + continue; + + /* Pad to alignment boundary */ + pad_len = ( ( -reader->pos ) & ( INITRD_ALIGN - 1 ) ); + if ( pad_len ) { + DBGC ( file, "EFIFILE %s [%#08zx,%#08zx) pad\n", + efi_file_name ( file ), reader->pos, + ( reader->pos + pad_len ) ); + } + len += efi_file_read_chunk ( reader, UNULL, pad_len ); + + /* Read CPIO header, if applicable */ + cpio_len = cpio_header ( image, &cpio ); + if ( cpio_len ) { + name = cpio_name ( image ); + name_len = cpio_name_len ( image ); + pad_len = ( cpio_len - sizeof ( cpio ) - name_len ); + DBGC ( file, "EFIFILE %s [%#08zx,%#08zx) %s header\n", + efi_file_name ( file ), reader->pos, + ( reader->pos + cpio_len ), image->name ); + len += efi_file_read_chunk ( reader, + virt_to_user ( &cpio ), + sizeof ( cpio ) ); + len += efi_file_read_chunk ( reader, + virt_to_user ( name ), + name_len ); + len += efi_file_read_chunk ( reader, UNULL, pad_len ); + } + + /* Read file data */ + DBGC ( file, "EFIFILE %s [%#08zx,%#08zx) %s\n", + efi_file_name ( file ), reader->pos, + ( reader->pos + image->len ), image->name ); + len += efi_file_read_chunk ( reader, image->data, image->len ); + } + + return len; +} + +/** + * Open fixed file + * + * @v file EFI file + * @v new New EFI file + * @ret efirc EFI status code + */ +static EFI_STATUS efi_file_open_fixed ( struct efi_file *file, + EFI_FILE_PROTOCOL **new ) { + + /* Increment reference count */ + ref_get ( &file->refcnt ); + + /* Return opened file */ + *new = &file->file; + + DBGC ( file, "EFIFILE %s opened\n", efi_file_name ( file ) ); + return 0; +} + +/** + * Associate file with image + * + * @v file EFI file + * @v image Image + */ +static void efi_file_image ( struct efi_file *file, struct image *image ) { + + file->image = image; + file->name = image->name; + file->read = efi_file_read_image; } /** @@ -106,50 +317,60 @@ static struct image * efi_file_find ( const CHAR16 *wname ) { */ static EFI_STATUS EFIAPI efi_file_open ( EFI_FILE_PROTOCOL *this, EFI_FILE_PROTOCOL **new, - CHAR16 *wname, UINT64 mode __unused, - UINT64 attributes __unused ) { + CHAR16 *wname, UINT64 mode, UINT64 attributes __unused ) { struct efi_file *file = container_of ( this, struct efi_file, file ); + char buf[ wcslen ( wname ) + 1 /* NUL */ ]; struct efi_file *new_file; struct image *image; + char *name; + + /* Convert name to ASCII */ + snprintf ( buf, sizeof ( buf ), "%ls", wname ); + name = buf; /* Initial '\' indicates opening from the root directory */ - while ( *wname == L'\\' ) { + while ( *name == '\\' ) { file = &efi_file_root; - wname++; + name++; } /* Allow root directory itself to be opened */ - if ( ( wname[0] == L'\0' ) || ( wname[0] == L'.' ) ) { - *new = &efi_file_root.file; - return 0; - } + if ( ( name[0] == '\0' ) || ( name[0] == '.' ) ) + return efi_file_open_fixed ( &efi_file_root, new ); /* Fail unless opening from the root */ - if ( file->image ) { + if ( file != &efi_file_root ) { DBGC ( file, "EFIFILE %s is not a directory\n", efi_file_name ( file ) ); return EFI_NOT_FOUND; } - /* Identify image */ - image = efi_file_find ( wname ); - if ( ! image ) { - DBGC ( file, "EFIFILE \"%ls\" does not exist\n", wname ); - return EFI_NOT_FOUND; - } - /* Fail unless opening read-only */ if ( mode != EFI_FILE_MODE_READ ) { DBGC ( file, "EFIFILE %s cannot be opened in mode %#08llx\n", - image->name, mode ); + name, mode ); return EFI_WRITE_PROTECTED; } + /* Allow magic initrd to be opened */ + if ( strcasecmp ( name, efi_file_initrd.name ) == 0 ) + return efi_file_open_fixed ( &efi_file_initrd, new ); + + /* Identify image */ + image = efi_file_find ( name ); + if ( ! image ) { + DBGC ( file, "EFIFILE %s does not exist\n", name ); + return EFI_NOT_FOUND; + } + /* Allocate and initialise file */ new_file = zalloc ( sizeof ( *new_file ) ); + if ( ! new_file ) + return EFI_OUT_OF_RESOURCES; + ref_init ( &file->refcnt, efi_file_free ); memcpy ( &new_file->file, &efi_file_root.file, sizeof ( new_file->file ) ); - new_file->image = image_get ( image ); + efi_file_image ( new_file, image_get ( image ) ); *new = &new_file->file; DBGC ( new_file, "EFIFILE %s opened\n", efi_file_name ( new_file ) ); @@ -165,14 +386,9 @@ efi_file_open ( EFI_FILE_PROTOCOL *this, EFI_FILE_PROTOCOL **new, static EFI_STATUS EFIAPI efi_file_close ( EFI_FILE_PROTOCOL *this ) { struct efi_file *file = container_of ( this, struct efi_file, file ); - /* Do nothing if this is the root */ - if ( ! file->image ) - return 0; - /* Close file */ DBGC ( file, "EFIFILE %s closed\n", efi_file_name ( file ) ); - image_put ( file->image ); - free ( file ); + ref_put ( &file->refcnt ); return 0; } @@ -229,30 +445,29 @@ static EFI_STATUS efi_file_varlen ( UINT64 *base, size_t base_len, /** * Return file information structure * - * @v image Image, or NULL for the root directory + * @v file EFI file * @v len Length of data buffer * @v data Data buffer * @ret efirc EFI status code */ -static EFI_STATUS efi_file_info ( struct image *image, UINTN *len, +static EFI_STATUS efi_file_info ( struct efi_file *file, UINTN *len, VOID *data ) { EFI_FILE_INFO info; - const char *name; + size_t file_len; + + /* Get file length */ + file_len = efi_file_len ( file ); /* Populate file information */ memset ( &info, 0, sizeof ( info ) ); - if ( image ) { - info.FileSize = image->len; - info.PhysicalSize = image->len; - info.Attribute = EFI_FILE_READ_ONLY; - name = image->name; - } else { - info.Attribute = ( EFI_FILE_READ_ONLY | EFI_FILE_DIRECTORY ); - name = ""; - } + info.FileSize = file_len; + info.PhysicalSize = file_len; + info.Attribute = EFI_FILE_READ_ONLY; + if ( file == &efi_file_root ) + info.Attribute |= EFI_FILE_DIRECTORY; - return efi_file_varlen ( &info.Size, SIZE_OF_EFI_FILE_INFO, name, - len, data ); + return efi_file_varlen ( &info.Size, SIZE_OF_EFI_FILE_INFO, + file->name, len, data ); } /** @@ -266,14 +481,16 @@ static EFI_STATUS efi_file_info ( struct image *image, UINTN *len, static EFI_STATUS efi_file_read_dir ( struct efi_file *file, UINTN *len, VOID *data ) { EFI_STATUS efirc; + struct efi_file entry; struct image *image; unsigned int index; - /* Construct directory entry at current position */ + /* Construct directory entries for image-backed files */ index = file->pos; for_each_image ( image ) { if ( index-- == 0 ) { - efirc = efi_file_info ( image, len, data ); + efi_file_image ( &entry, image ); + efirc = efi_file_info ( &entry, len, data ); if ( efirc == 0 ) file->pos++; return efirc; @@ -296,21 +513,25 @@ static EFI_STATUS efi_file_read_dir ( struct efi_file *file, UINTN *len, static EFI_STATUS EFIAPI efi_file_read ( EFI_FILE_PROTOCOL *this, UINTN *len, VOID *data ) { struct efi_file *file = container_of ( this, struct efi_file, file ); - size_t remaining; + struct efi_file_reader reader; + size_t pos = file->pos; /* If this is the root directory, then construct a directory entry */ - if ( ! file->image ) + if ( ! file->read ) return efi_file_read_dir ( file, len, data ); + /* Initialise reader */ + reader.file = file; + reader.pos = 0; + reader.data = data; + reader.len = *len; + /* Read from the file */ - remaining = ( file->image->len - file->pos ); - if ( *len > remaining ) - *len = remaining; DBGC ( file, "EFIFILE %s read [%#08zx,%#08zx)\n", - efi_file_name ( file ), file->pos, - ( ( size_t ) ( file->pos + *len ) ) ); - copy_from_user ( data, file->image->data, file->pos, *len ); - file->pos += *len; + efi_file_name ( file ), pos, file->pos ); + *len = file->read ( &reader ); + assert ( ( pos + *len ) == file->pos ); + return 0; } @@ -342,24 +563,21 @@ static EFI_STATUS EFIAPI efi_file_write ( EFI_FILE_PROTOCOL *this, static EFI_STATUS EFIAPI efi_file_set_position ( EFI_FILE_PROTOCOL *this, UINT64 position ) { struct efi_file *file = container_of ( this, struct efi_file, file ); + size_t len; - /* If this is the root directory, reset to the start */ - if ( ! file->image ) { - DBGC ( file, "EFIFILE root directory rewound\n" ); - file->pos = 0; - return 0; - } + /* Get file length */ + len = efi_file_len ( file ); /* Check for the magic end-of-file value */ if ( position == 0xffffffffffffffffULL ) - position = file->image->len; + position = len; /* Fail if we attempt to seek past the end of the file (since * we do not support writes). */ - if ( position > file->image->len ) { + if ( position > len ) { DBGC ( file, "EFIFILE %s cannot seek to %#08llx of %#08zx\n", - efi_file_name ( file ), position, file->image->len ); + efi_file_name ( file ), position, len ); return EFI_UNSUPPORTED; } @@ -408,7 +626,7 @@ static EFI_STATUS EFIAPI efi_file_get_info ( EFI_FILE_PROTOCOL *this, /* Get file information */ DBGC ( file, "EFIFILE %s get file information\n", efi_file_name ( file ) ); - return efi_file_info ( file->image, len, data ); + return efi_file_info ( file, len, data ); } else if ( memcmp ( type, &efi_file_system_info_id, sizeof ( *type ) ) == 0 ) { @@ -468,6 +686,7 @@ static EFI_STATUS EFIAPI efi_file_flush ( EFI_FILE_PROTOCOL *this ) { /** Root directory */ static struct efi_file efi_file_root = { + .refcnt = REF_INIT ( ref_no_free ), .file = { .Revision = EFI_FILE_PROTOCOL_REVISION, .Open = efi_file_open, @@ -482,6 +701,28 @@ static struct efi_file efi_file_root = { .Flush = efi_file_flush, }, .image = NULL, + .name = "", +}; + +/** Magic initrd file */ +static struct efi_file efi_file_initrd = { + .refcnt = REF_INIT ( ref_no_free ), + .file = { + .Revision = EFI_FILE_PROTOCOL_REVISION, + .Open = efi_file_open, + .Close = efi_file_close, + .Delete = efi_file_delete, + .Read = efi_file_read, + .Write = efi_file_write, + .GetPosition = efi_file_get_position, + .SetPosition = efi_file_set_position, + .GetInfo = efi_file_get_info, + .SetInfo = efi_file_set_info, + .Flush = efi_file_flush, + }, + .image = NULL, + .name = "initrd.magic", + .read = efi_file_read_initrd, }; /** @@ -496,8 +737,7 @@ efi_file_open_volume ( EFI_SIMPLE_FILE_SYSTEM_PROTOCOL *filesystem __unused, EFI_FILE_PROTOCOL **file ) { DBGC ( &efi_file_root, "EFIFILE open volume\n" ); - *file = &efi_file_root.file; - return 0; + return efi_file_open_fixed ( &efi_file_root, file ); } /** EFI simple file system protocol */ diff --git a/src/interface/efi/efi_init.c b/src/interface/efi/efi_init.c index ed9707f2c..b7cac16e5 100644 --- a/src/interface/efi/efi_init.c +++ b/src/interface/efi/efi_init.c @@ -21,9 +21,12 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include +#include #include +#include #include #include +#include #include /** Image handle passed to entry point */ @@ -32,6 +35,9 @@ EFI_HANDLE efi_image_handle; /** Loaded image protocol for this image */ EFI_LOADED_IMAGE_PROTOCOL *efi_loaded_image; +/** Device path for the loaded image's device handle */ +EFI_DEVICE_PATH_PROTOCOL *efi_loaded_image_path; + /** System table passed to entry point * * We construct the symbol name efi_systab via the PLATFORM macro. @@ -41,12 +47,25 @@ EFI_LOADED_IMAGE_PROTOCOL *efi_loaded_image; */ EFI_SYSTEM_TABLE * _C2 ( PLATFORM, _systab ); +/** External task priority level */ +EFI_TPL efi_external_tpl = TPL_APPLICATION; + /** EFI shutdown is in progress */ int efi_shutdown_in_progress; /** Event used to signal shutdown */ static EFI_EVENT efi_shutdown_event; +/** Stack cookie */ +unsigned long __stack_chk_guard; + +/** Exit function + * + * Cached to minimise external dependencies when a stack check + * failure is triggered. + */ +static EFI_EXIT efi_exit; + /* Forward declarations */ static EFI_STATUS EFIAPI efi_unload ( EFI_HANDLE image_handle ); @@ -87,6 +106,46 @@ static void * efi_find_table ( EFI_GUID *guid ) { return NULL; } +/** + * Construct a stack cookie value + * + * @v handle Image handle + * @ret cookie Stack cookie + */ +__attribute__ (( noinline )) unsigned long +efi_stack_cookie ( EFI_HANDLE handle ) { + unsigned long cookie = 0; + unsigned int rotation = ( 8 * sizeof ( cookie ) / 4 ); + + /* There is no viable source of entropy available at this + * point. Construct a value that is at least likely to vary + * between platforms and invocations. + */ + cookie ^= ( ( unsigned long ) handle ); + cookie = roll ( cookie, rotation ); + cookie ^= ( ( unsigned long ) &handle ); + cookie = roll ( cookie, rotation ); + cookie ^= profile_timestamp(); + cookie = roll ( cookie, rotation ); + cookie ^= build_id; + + /* Ensure that the value contains a NUL byte, to act as a + * runaway string terminator. Construct the NUL using a shift + * rather than a mask, to avoid losing valuable entropy in the + * lower-order bits. + */ + cookie <<= 8; + + /* Ensure that the NUL byte is placed at the bottom of the + * stack cookie, to avoid potential disclosure via an + * unterminated string. + */ + if ( __BYTE_ORDER == __BIG_ENDIAN ) + cookie >>= 8; + + return cookie; +} + /** * Initialise EFI environment * @@ -100,6 +159,9 @@ EFI_STATUS efi_init ( EFI_HANDLE image_handle, struct efi_protocol *prot; struct efi_config_table *tab; void *loaded_image; + void *device_path; + void *device_path_copy; + size_t device_path_len; EFI_STATUS efirc; int rc; @@ -130,6 +192,9 @@ EFI_STATUS efi_init ( EFI_HANDLE image_handle, DBGC ( systab, "EFI handle %p systab %p\n", image_handle, systab ); bs = systab->BootServices; + /* Store abort function pointer */ + efi_exit = bs->Exit; + /* Look up used protocols */ for_each_table_entry ( prot, EFI_PROTOCOLS ) { if ( ( efirc = bs->LocateProtocol ( &prot->guid, NULL, @@ -175,6 +240,33 @@ EFI_STATUS efi_init ( EFI_HANDLE image_handle, DBGC ( systab, "EFI image base address %p\n", efi_loaded_image->ImageBase ); + /* Get loaded image's device handle's device path */ + if ( ( efirc = bs->OpenProtocol ( efi_loaded_image->DeviceHandle, + &efi_device_path_protocol_guid, + &device_path, image_handle, NULL, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( systab, "EFI could not get loaded image's device path: " + "%s", strerror ( rc ) ); + goto err_no_device_path; + } + + /* Make a copy of the loaded image's device handle's device + * path, since the device handle itself may become invalidated + * when we load our own drivers. + */ + device_path_len = ( efi_path_len ( device_path ) + + sizeof ( EFI_DEVICE_PATH_PROTOCOL ) ); + if ( ( efirc = bs->AllocatePool ( EfiBootServicesData, device_path_len, + &device_path_copy ) ) != 0 ) { + rc = -EEFI ( efirc ); + goto err_alloc_device_path; + } + memcpy ( device_path_copy, device_path, device_path_len ); + efi_loaded_image_path = device_path_copy; + DBGC ( systab, "EFI image device path %s\n", + efi_devpath_text ( efi_loaded_image_path ) ); + /* EFI is perfectly capable of gracefully shutting down any * loaded devices if it decides to fall back to a legacy boot. * For no particularly comprehensible reason, it doesn't @@ -206,6 +298,9 @@ EFI_STATUS efi_init ( EFI_HANDLE image_handle, err_driver_install: bs->CloseEvent ( efi_shutdown_event ); err_create_event: + bs->FreePool ( efi_loaded_image_path ); + err_alloc_device_path: + err_no_device_path: err_no_loaded_image: err_missing_table: err_missing_protocol: @@ -236,7 +331,67 @@ static EFI_STATUS EFIAPI efi_unload ( EFI_HANDLE image_handle __unused ) { /* Uninstall exit boot services event */ bs->CloseEvent ( efi_shutdown_event ); + /* Free copy of loaded image's device handle's device path */ + bs->FreePool ( efi_loaded_image_path ); + DBGC ( systab, "EFI image unloaded\n" ); return 0; } + +/** + * Abort on stack check failure + * + */ +__attribute__ (( noreturn )) void __stack_chk_fail ( void ) { + EFI_STATUS efirc; + int rc; + + /* Report failure (when debugging) */ + DBGC ( efi_systab, "EFI stack check failed (cookie %#lx); aborting\n", + __stack_chk_guard ); + + /* Attempt to exit cleanly with an error status */ + if ( efi_exit ) { + efirc = efi_exit ( efi_image_handle, EFI_COMPROMISED_DATA, + 0, NULL ); + rc = -EEFI ( efirc ); + DBGC ( efi_systab, "EFI stack check exit failed: %s\n", + strerror ( rc ) ); + } + + /* If the exit fails for any reason, lock the system */ + while ( 1 ) {} + +} + +/** + * Raise task priority level to TPL_CALLBACK + * + * @v tpl Saved TPL + */ +void efi_raise_tpl ( struct efi_saved_tpl *tpl ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + + /* Record current external TPL */ + tpl->previous = efi_external_tpl; + + /* Raise TPL and record previous TPL as new external TPL */ + tpl->current = bs->RaiseTPL ( TPL_CALLBACK ); + efi_external_tpl = tpl->current; +} + +/** + * Restore task priority level + * + * @v tpl Saved TPL + */ +void efi_restore_tpl ( struct efi_saved_tpl *tpl ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + + /* Restore external TPL */ + efi_external_tpl = tpl->previous; + + /* Restore TPL */ + bs->RestoreTPL ( tpl->current ); +} diff --git a/src/interface/efi/efi_local.c b/src/interface/efi/efi_local.c index bd010ad2e..4ebca5726 100644 --- a/src/interface/efi/efi_local.c +++ b/src/interface/efi/efi_local.c @@ -37,7 +37,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include +#include #include #include #include @@ -307,6 +307,7 @@ static int efi_local_open_volume ( struct efi_local *local, EFI_GUID *protocol = &efi_simple_file_system_protocol_guid; int ( * check ) ( struct efi_local *local, EFI_HANDLE device, EFI_FILE_PROTOCOL *root, const char *volume ); + EFI_DEVICE_PATH_PROTOCOL *path; EFI_FILE_PROTOCOL *root; EFI_HANDLE *handles; EFI_HANDLE device; @@ -328,8 +329,18 @@ static int efi_local_open_volume ( struct efi_local *local, } check = efi_local_check_volume_name; } else { - /* Use our loaded image's device handle */ - handles = &efi_loaded_image->DeviceHandle; + /* Locate filesystem from which we were loaded */ + path = efi_loaded_image_path; + if ( ( efirc = bs->LocateDevicePath ( protocol, &path, + &device ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( local, "LOCAL %p could not locate file system " + "on %s: %s\n", local, + efi_devpath_text ( efi_loaded_image_path ), + strerror ( rc ) ); + return rc; + } + handles = &device; num_handles = 1; check = NULL; } @@ -414,7 +425,7 @@ static int efi_local_open_resolved ( struct efi_local *local, static int efi_local_open_path ( struct efi_local *local, const char *path ) { FILEPATH_DEVICE_PATH *fp = container_of ( efi_loaded_image->FilePath, FILEPATH_DEVICE_PATH, Header); - size_t fp_len = ( fp ? efi_devpath_len ( &fp->Header ) : 0 ); + size_t fp_len = ( fp ? efi_path_len ( &fp->Header ) : 0 ); char base[ fp_len / 2 /* Cannot exceed this length */ ]; size_t remaining = sizeof ( base ); size_t len; @@ -537,8 +548,8 @@ static int efi_local_open ( struct interface *xfer, struct uri *uri ) { } ref_init ( &local->refcnt, NULL ); intf_init ( &local->xfer, &efi_local_xfer_desc, &local->refcnt ); - process_init ( &local->process, &efi_local_process_desc, - &local->refcnt ); + process_init_stopped ( &local->process, &efi_local_process_desc, + &local->refcnt ); /* Open specified volume */ if ( ( rc = efi_local_open_volume ( local, volume ) ) != 0 ) @@ -552,6 +563,9 @@ static int efi_local_open ( struct interface *xfer, struct uri *uri ) { if ( ( rc = efi_local_len ( local ) ) != 0 ) goto err_len; + /* Start download process */ + process_add ( &local->process ); + /* Attach to parent interface, mortalise self, and return */ intf_plug_plug ( &local->xfer, xfer ); ref_put ( &local->refcnt ); diff --git a/src/interface/efi/efi_null.c b/src/interface/efi/efi_null.c new file mode 100644 index 000000000..29ca5b9b6 --- /dev/null +++ b/src/interface/efi/efi_null.c @@ -0,0 +1,672 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * EFI null interfaces + * + */ + +/****************************************************************************** + * + * Simple Network Protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_snp_start ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_stop ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_initialize ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINTN extra_rx_bufsize __unused, + UINTN extra_tx_bufsize __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_reset ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN ext_verify __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_shutdown ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_receive_filters ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINT32 enable __unused, + UINT32 disable __unused, + BOOLEAN mcast_reset __unused, + UINTN mcast_count __unused, + EFI_MAC_ADDRESS *mcast __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_station_address ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN reset __unused, + EFI_MAC_ADDRESS *new __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_statistics ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN reset __unused, UINTN *stats_len __unused, + EFI_NETWORK_STATISTICS *stats __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_mcast_ip_to_mac ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN ipv6 __unused, + EFI_IP_ADDRESS *ip __unused, + EFI_MAC_ADDRESS *mac __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_nvdata ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + BOOLEAN read __unused, UINTN offset __unused, + UINTN len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_get_status ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINT32 *interrupts __unused, VOID **txbuf __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_transmit ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINTN ll_header_len __unused, UINTN len __unused, + VOID *data __unused, EFI_MAC_ADDRESS *ll_src __unused, + EFI_MAC_ADDRESS *ll_dest __unused, + UINT16 *net_proto __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_snp_receive ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused, + UINTN *ll_header_len __unused, UINTN *len __unused, + VOID *data __unused, EFI_MAC_ADDRESS *ll_src __unused, + EFI_MAC_ADDRESS *ll_dest __unused, + UINT16 *net_proto __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_SIMPLE_NETWORK_PROTOCOL efi_null_snp = { + .Revision = EFI_SIMPLE_NETWORK_PROTOCOL_REVISION, + .Start = efi_null_snp_start, + .Stop = efi_null_snp_stop, + .Initialize = efi_null_snp_initialize, + .Reset = efi_null_snp_reset, + .Shutdown = efi_null_snp_shutdown, + .ReceiveFilters = efi_null_snp_receive_filters, + .StationAddress = efi_null_snp_station_address, + .Statistics = efi_null_snp_statistics, + .MCastIpToMac = efi_null_snp_mcast_ip_to_mac, + .NvData = efi_null_snp_nvdata, + .GetStatus = efi_null_snp_get_status, + .Transmit = efi_null_snp_transmit, + .Receive = efi_null_snp_receive, +}; + +/** + * Nullify SNP interface + * + * @v snp SNP interface + */ +void efi_nullify_snp ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ) { + + memcpy ( snp, &efi_null_snp, + offsetof ( typeof ( *snp ), WaitForPacket ) ); + snp->Mode->State = EfiSimpleNetworkStopped; +} + +/****************************************************************************** + * + * Network Interface Identification protocol + * + ****************************************************************************** + */ + +static EFIAPI VOID efi_null_undi_issue ( UINT64 cdb_phys ) { + PXE_CDB *cdb = ( ( void * ) ( intptr_t ) cdb_phys ); + + cdb->StatCode = PXE_STATCODE_UNSUPPORTED; + cdb->StatFlags = PXE_STATFLAGS_COMMAND_FAILED; +} + +static PXE_SW_UNDI efi_null_undi __attribute__ (( aligned ( 16 ) )) = { + .Signature = PXE_ROMID_SIGNATURE, + .Len = sizeof ( efi_null_undi ), + .Rev = PXE_ROMID_REV, + .MajorVer = PXE_ROMID_MAJORVER, + .MinorVer = PXE_ROMID_MINORVER, + .Implementation = PXE_ROMID_IMP_SW_VIRT_ADDR, +}; + +/** + * Nullify NII interface + * + * @v nii NII interface + */ +void efi_nullify_nii ( EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL *nii ) { + efi_null_undi.EntryPoint = ( ( intptr_t ) efi_null_undi_issue ); + nii->Id = ( ( intptr_t ) &efi_null_undi ); +} + +/****************************************************************************** + * + * Component name protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_get_driver_name ( EFI_COMPONENT_NAME2_PROTOCOL *name2 __unused, + CHAR8 *language __unused, + CHAR16 **driver_name __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_get_controller_name ( EFI_COMPONENT_NAME2_PROTOCOL *name2 __unused, + EFI_HANDLE device __unused, + EFI_HANDLE child __unused, + CHAR8 *language __unused, + CHAR16 **controller_name __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_COMPONENT_NAME2_PROTOCOL efi_null_name2 = { + .GetDriverName = efi_null_get_driver_name, + .GetControllerName = efi_null_get_controller_name, + .SupportedLanguages = "", +}; + +/** + * Nullify Component Name Protocol interface + * + * @v name2 Component name protocol + */ +void efi_nullify_name2 ( EFI_COMPONENT_NAME2_PROTOCOL *name2 ) { + + memcpy ( name2, &efi_null_name2, sizeof ( *name2 ) ); +} + +/****************************************************************************** + * + * Load file protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_load_file ( EFI_LOAD_FILE_PROTOCOL *load_file __unused, + EFI_DEVICE_PATH_PROTOCOL *path __unused, + BOOLEAN booting __unused, UINTN *len __unused, + VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +/** + * Nullify Load File Protocol interface + * + * @v load_file Load file protocol + */ +void efi_nullify_load_file ( EFI_LOAD_FILE_PROTOCOL *load_file ) { + load_file->LoadFile = efi_null_load_file; +} + +/****************************************************************************** + * + * HII configuration access protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_hii_extract ( const EFI_HII_CONFIG_ACCESS_PROTOCOL *hii __unused, + EFI_STRING request __unused, + EFI_STRING *progress __unused, + EFI_STRING *results __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_hii_route ( const EFI_HII_CONFIG_ACCESS_PROTOCOL *hii __unused, + EFI_STRING config __unused, + EFI_STRING *progress __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_hii_callback ( const EFI_HII_CONFIG_ACCESS_PROTOCOL *hii __unused, + EFI_BROWSER_ACTION action __unused, + EFI_QUESTION_ID question_id __unused, + UINT8 type __unused, EFI_IFR_TYPE_VALUE *value __unused, + EFI_BROWSER_ACTION_REQUEST *action_request __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_HII_CONFIG_ACCESS_PROTOCOL efi_null_hii = { + .ExtractConfig = efi_null_hii_extract, + .RouteConfig = efi_null_hii_route, + .Callback = efi_null_hii_callback, +}; + +/** + * Nullify HII configuration access protocol + * + * @v hii HII configuration access protocol + */ +void efi_nullify_hii ( EFI_HII_CONFIG_ACCESS_PROTOCOL *hii ) { + + memcpy ( hii, &efi_null_hii, sizeof ( *hii ) ); +} + +/****************************************************************************** + * + * Block I/O protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_block_reset ( EFI_BLOCK_IO_PROTOCOL *block __unused, + BOOLEAN verify __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_block_read ( EFI_BLOCK_IO_PROTOCOL *block __unused, + UINT32 media __unused, EFI_LBA lba __unused, + UINTN len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_block_write ( EFI_BLOCK_IO_PROTOCOL *block __unused, + UINT32 media __unused, EFI_LBA lba __unused, + UINTN len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_block_flush ( EFI_BLOCK_IO_PROTOCOL *block __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_BLOCK_IO_MEDIA efi_null_block_media; + +static EFI_BLOCK_IO_PROTOCOL efi_null_block = { + .Revision = EFI_BLOCK_IO_INTERFACE_REVISION, + .Media = &efi_null_block_media, + .Reset = efi_null_block_reset, + .ReadBlocks = efi_null_block_read, + .WriteBlocks = efi_null_block_write, + .FlushBlocks = efi_null_block_flush, +}; + +/** + * Nullify block I/O protocol + * + * @v block Block I/O protocol + */ +void efi_nullify_block ( EFI_BLOCK_IO_PROTOCOL *block ) { + + memcpy ( block, &efi_null_block, sizeof ( *block ) ); +} + +/****************************************************************************** + * + * PXE base code protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_pxe_start ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN use_ipv6 __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_stop ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_dhcp ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN sort __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_discover ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + UINT16 type __unused, UINT16 *layer __unused, + BOOLEAN bis __unused, + EFI_PXE_BASE_CODE_DISCOVER_INFO *info __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_mtftp ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_PXE_BASE_CODE_TFTP_OPCODE opcode __unused, + VOID *data __unused, BOOLEAN overwrite __unused, + UINT64 *len __unused, UINTN *blksize __unused, + EFI_IP_ADDRESS *ip __unused, UINT8 *filename __unused, + EFI_PXE_BASE_CODE_MTFTP_INFO *info __unused, + BOOLEAN callback __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_udp_write ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + UINT16 flags __unused, + EFI_IP_ADDRESS *dest_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *dest_port __unused, + EFI_IP_ADDRESS *gateway __unused, + EFI_IP_ADDRESS *src_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *src_port __unused, + UINTN *hdr_len __unused, VOID *hdr __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_udp_read ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + UINT16 flags __unused, + EFI_IP_ADDRESS *dest_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *dest_port __unused, + EFI_IP_ADDRESS *src_ip __unused, + EFI_PXE_BASE_CODE_UDP_PORT *src_port __unused, + UINTN *hdr_len __unused, VOID *hdr __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_ip_filter ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_PXE_BASE_CODE_IP_FILTER *filter __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_arp ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_IP_ADDRESS *ip __unused, + EFI_MAC_ADDRESS *mac __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_parameters ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN *autoarp __unused, + BOOLEAN *sendguid __unused, UINT8 *ttl __unused, + UINT8 *tos __unused, + BOOLEAN *callback __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_station_ip ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + EFI_IP_ADDRESS *ip __unused, + EFI_IP_ADDRESS *netmask __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_pxe_set_packets ( EFI_PXE_BASE_CODE_PROTOCOL *pxe __unused, + BOOLEAN *dhcpdisc_ok __unused, + BOOLEAN *dhcpack_ok __unused, + BOOLEAN *proxyoffer_ok __unused, + BOOLEAN *pxebsdisc_ok __unused, + BOOLEAN *pxebsack_ok __unused, + BOOLEAN *pxebsbis_ok __unused, + EFI_PXE_BASE_CODE_PACKET *dhcpdisc __unused, + EFI_PXE_BASE_CODE_PACKET *dhcpack __unused, + EFI_PXE_BASE_CODE_PACKET *proxyoffer __unused, + EFI_PXE_BASE_CODE_PACKET *pxebsdisc __unused, + EFI_PXE_BASE_CODE_PACKET *pxebsack __unused, + EFI_PXE_BASE_CODE_PACKET *pxebsbis __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_PXE_BASE_CODE_PROTOCOL efi_null_pxe = { + .Revision = EFI_PXE_BASE_CODE_PROTOCOL_REVISION, + .Start = efi_null_pxe_start, + .Stop = efi_null_pxe_stop, + .Dhcp = efi_null_pxe_dhcp, + .Discover = efi_null_pxe_discover, + .Mtftp = efi_null_pxe_mtftp, + .UdpWrite = efi_null_pxe_udp_write, + .UdpRead = efi_null_pxe_udp_read, + .SetIpFilter = efi_null_pxe_set_ip_filter, + .Arp = efi_null_pxe_arp, + .SetParameters = efi_null_pxe_set_parameters, + .SetStationIp = efi_null_pxe_set_station_ip, + .SetPackets = efi_null_pxe_set_packets, +}; + +/** + * Nullify PXE base code protocol + * + * @v pxe PXE base code protocol + */ +void efi_nullify_pxe ( EFI_PXE_BASE_CODE_PROTOCOL *pxe ) { + + memcpy ( pxe, &efi_null_pxe, offsetof ( typeof ( *pxe ), Mode ) ); + pxe->Mode->Started = FALSE; +} + +/****************************************************************************** + * + * Apple Net Boot protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_apple_dhcp ( EFI_APPLE_NET_BOOT_PROTOCOL *apple __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_apple_bsdp ( EFI_APPLE_NET_BOOT_PROTOCOL *apple __unused, + UINTN *len __unused, VOID *data __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_APPLE_NET_BOOT_PROTOCOL efi_null_apple = { + .GetDhcpResponse = efi_null_apple_dhcp, + .GetBsdpResponse = efi_null_apple_bsdp, +}; + +/** + * Nullify Apple Net Boot protocol + * + * @v apple Apple Net Boot protocol + */ +void efi_nullify_apple ( EFI_APPLE_NET_BOOT_PROTOCOL *apple ) { + + memcpy ( apple, &efi_null_apple, sizeof ( *apple ) ); +} + +/****************************************************************************** + * + * USB I/O Protocol + * + ****************************************************************************** + */ + +static EFI_STATUS EFIAPI +efi_null_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio __unused, + EFI_USB_DEVICE_REQUEST *packet __unused, + EFI_USB_DATA_DIRECTION direction __unused, + UINT32 timeout __unused, VOID *data __unused, + UINTN len __unused, UINT32 *status __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_bulk_transfer ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT8 endpoint __unused, VOID *data __unused, + UINTN *len __unused, UINTN timeout __unused, + UINT32 *status __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_sync_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT8 endpoint __unused, + VOID *data __unused, + UINTN *len __unused, + UINTN timeout __unused, + UINT32 *status __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_async_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT8 endpoint __unused, + BOOLEAN start __unused, + UINTN interval __unused, + UINTN len __unused, + EFI_ASYNC_USB_TRANSFER_CALLBACK + callback __unused, + VOID *context __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_isochronous_transfer ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT8 endpoint __unused, + VOID *data __unused, UINTN len __unused, + UINT32 *status __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_async_isochronous_transfer ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT8 endpoint __unused, + VOID *data __unused, + UINTN len __unused, + EFI_ASYNC_USB_TRANSFER_CALLBACK + callback __unused, + VOID *context __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_get_device_descriptor ( EFI_USB_IO_PROTOCOL *usbio __unused, + EFI_USB_DEVICE_DESCRIPTOR + *efidesc __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_get_config_descriptor ( EFI_USB_IO_PROTOCOL *usbio __unused, + EFI_USB_CONFIG_DESCRIPTOR + *efidesc __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_get_interface_descriptor ( EFI_USB_IO_PROTOCOL *usbio __unused, + EFI_USB_INTERFACE_DESCRIPTOR + *efidesc __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_get_endpoint_descriptor ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT8 index __unused, + EFI_USB_ENDPOINT_DESCRIPTOR + *efidesc __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT16 language __unused, + UINT8 index __unused, + CHAR16 **string __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_get_supported_languages ( EFI_USB_IO_PROTOCOL *usbio __unused, + UINT16 **languages __unused, + UINT16 *len __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_STATUS EFIAPI +efi_null_usb_port_reset ( EFI_USB_IO_PROTOCOL *usbio __unused ) { + return EFI_UNSUPPORTED; +} + +static EFI_USB_IO_PROTOCOL efi_null_usbio = { + .UsbControlTransfer = efi_null_usb_control_transfer, + .UsbBulkTransfer = efi_null_usb_bulk_transfer, + .UsbAsyncInterruptTransfer = efi_null_usb_async_interrupt_transfer, + .UsbSyncInterruptTransfer = efi_null_usb_sync_interrupt_transfer, + .UsbIsochronousTransfer = efi_null_usb_isochronous_transfer, + .UsbAsyncIsochronousTransfer = efi_null_usb_async_isochronous_transfer, + .UsbGetDeviceDescriptor = efi_null_usb_get_device_descriptor, + .UsbGetConfigDescriptor = efi_null_usb_get_config_descriptor, + .UsbGetInterfaceDescriptor = efi_null_usb_get_interface_descriptor, + .UsbGetEndpointDescriptor = efi_null_usb_get_endpoint_descriptor, + .UsbGetStringDescriptor = efi_null_usb_get_string_descriptor, + .UsbGetSupportedLanguages = efi_null_usb_get_supported_languages, + .UsbPortReset = efi_null_usb_port_reset, +}; + +/** + * Nullify USB I/O protocol + * + * @v usbio USB I/O protocol + */ +void efi_nullify_usbio ( EFI_USB_IO_PROTOCOL *usbio ) { + + memcpy ( usbio, &efi_null_usbio, sizeof ( *usbio ) ); +} diff --git a/src/interface/efi/efi_path.c b/src/interface/efi/efi_path.c new file mode 100644 index 000000000..bae0ac4b5 --- /dev/null +++ b/src/interface/efi/efi_path.c @@ -0,0 +1,506 @@ +/* + * Copyright (C) 2020 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI device paths + * + */ + +/** + * Find end of device path + * + * @v path Path to device + * @ret path_end End of device path + */ +EFI_DEVICE_PATH_PROTOCOL * efi_path_end ( EFI_DEVICE_PATH_PROTOCOL *path ) { + + while ( path->Type != END_DEVICE_PATH_TYPE ) { + path = ( ( ( void * ) path ) + + /* There's this amazing new-fangled thing known as + * a UINT16, but who wants to use one of those? */ + ( ( path->Length[1] << 8 ) | path->Length[0] ) ); + } + + return path; +} + +/** + * Find length of device path (excluding terminator) + * + * @v path Path to device + * @ret path_len Length of device path + */ +size_t efi_path_len ( EFI_DEVICE_PATH_PROTOCOL *path ) { + EFI_DEVICE_PATH_PROTOCOL *end = efi_path_end ( path ); + + return ( ( ( void * ) end ) - ( ( void * ) path ) ); +} + +/** + * Concatenate EFI device paths + * + * @v ... List of device paths (NULL terminated) + * @ret path Concatenated device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_paths ( EFI_DEVICE_PATH_PROTOCOL *first, ... ) { + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *src; + EFI_DEVICE_PATH_PROTOCOL *dst; + EFI_DEVICE_PATH_PROTOCOL *end; + va_list args; + size_t len; + + /* Calculate device path length */ + va_start ( args, first ); + len = 0; + src = first; + while ( src ) { + len += efi_path_len ( src ); + src = va_arg ( args, EFI_DEVICE_PATH_PROTOCOL * ); + } + va_end ( args ); + + /* Allocate device path */ + path = zalloc ( len + sizeof ( *end ) ); + if ( ! path ) + return NULL; + + /* Populate device path */ + va_start ( args, first ); + dst = path; + src = first; + while ( src ) { + len = efi_path_len ( src ); + memcpy ( dst, src, len ); + dst = ( ( ( void * ) dst ) + len ); + src = va_arg ( args, EFI_DEVICE_PATH_PROTOCOL * ); + } + va_end ( args ); + end = dst; + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for network device + * + * @v netdev Network device + * @ret path EFI device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_netdev_path ( struct net_device *netdev ) { + struct efi_device *efidev; + EFI_DEVICE_PATH_PROTOCOL *path; + MAC_ADDR_DEVICE_PATH *macpath; + VLAN_DEVICE_PATH *vlanpath; + EFI_DEVICE_PATH_PROTOCOL *end; + unsigned int tag; + size_t prefix_len; + size_t len; + + /* Find parent EFI device */ + efidev = efidev_parent ( netdev->dev ); + if ( ! efidev ) + return NULL; + + /* Calculate device path length */ + prefix_len = efi_path_len ( efidev->path ); + len = ( prefix_len + sizeof ( *macpath ) + sizeof ( *vlanpath ) + + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + memcpy ( path, efidev->path, prefix_len ); + macpath = ( ( ( void * ) path ) + prefix_len ); + macpath->Header.Type = MESSAGING_DEVICE_PATH; + macpath->Header.SubType = MSG_MAC_ADDR_DP; + macpath->Header.Length[0] = sizeof ( *macpath ); + assert ( netdev->ll_protocol->ll_addr_len < + sizeof ( macpath->MacAddress ) ); + memcpy ( &macpath->MacAddress, netdev->ll_addr, + netdev->ll_protocol->ll_addr_len ); + macpath->IfType = ntohs ( netdev->ll_protocol->ll_proto ); + if ( ( tag = vlan_tag ( netdev ) ) ) { + vlanpath = ( ( ( void * ) macpath ) + sizeof ( *macpath ) ); + vlanpath->Header.Type = MESSAGING_DEVICE_PATH; + vlanpath->Header.SubType = MSG_VLAN_DP; + vlanpath->Header.Length[0] = sizeof ( *vlanpath ); + vlanpath->VlanId = tag; + end = ( ( ( void * ) vlanpath ) + sizeof ( *vlanpath ) ); + } else { + end = ( ( ( void * ) macpath ) + sizeof ( *macpath ) ); + } + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for URI + * + * @v uri URI + * @ret path EFI device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_uri_path ( struct uri *uri ) { + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + URI_DEVICE_PATH *uripath; + size_t uri_len; + size_t uripath_len; + size_t len; + + /* Calculate device path length */ + uri_len = ( format_uri ( uri, NULL, 0 ) + 1 /* NUL */ ); + uripath_len = ( sizeof ( *uripath ) + uri_len ); + len = ( uripath_len + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + uripath = ( ( void * ) path ); + uripath->Header.Type = MESSAGING_DEVICE_PATH; + uripath->Header.SubType = MSG_URI_DP; + uripath->Header.Length[0] = ( uripath_len & 0xff ); + uripath->Header.Length[1] = ( uripath_len >> 8 ); + format_uri ( uri, uripath->Uri, uri_len ); + end = ( ( ( void * ) path ) + uripath_len ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for iSCSI device + * + * @v iscsi iSCSI session + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_iscsi_path ( struct iscsi_session *iscsi ) { + struct sockaddr_tcpip *st_target; + struct net_device *netdev; + EFI_DEVICE_PATH_PROTOCOL *netpath; + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + ISCSI_DEVICE_PATH *iscsipath; + char *name; + size_t prefix_len; + size_t name_len; + size_t iscsi_len; + size_t len; + + /* Get network device associated with target address */ + st_target = ( ( struct sockaddr_tcpip * ) &iscsi->target_sockaddr ); + netdev = tcpip_netdev ( st_target ); + if ( ! netdev ) + goto err_netdev; + + /* Get network device path */ + netpath = efi_netdev_path ( netdev ); + if ( ! netpath ) + goto err_netpath; + + /* Calculate device path length */ + prefix_len = efi_path_len ( netpath ); + name_len = ( strlen ( iscsi->target_iqn ) + 1 /* NUL */ ); + iscsi_len = ( sizeof ( *iscsipath ) + name_len ); + len = ( prefix_len + iscsi_len + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + goto err_alloc; + + /* Construct device path */ + memcpy ( path, netpath, prefix_len ); + iscsipath = ( ( ( void * ) path ) + prefix_len ); + iscsipath->Header.Type = MESSAGING_DEVICE_PATH; + iscsipath->Header.SubType = MSG_ISCSI_DP; + iscsipath->Header.Length[0] = iscsi_len; + iscsipath->LoginOption = ISCSI_LOGIN_OPTION_AUTHMETHOD_NON; + memcpy ( &iscsipath->Lun, &iscsi->lun, sizeof ( iscsipath->Lun ) ); + name = ( ( ( void * ) iscsipath ) + sizeof ( *iscsipath ) ); + memcpy ( name, iscsi->target_iqn, name_len ); + end = ( ( ( void * ) name ) + name_len ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + /* Free temporary paths */ + free ( netpath ); + + return path; + + err_alloc: + free ( netpath ); + err_netpath: + err_netdev: + return NULL; +} + +/** + * Construct EFI device path for AoE device + * + * @v aoedev AoE device + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_aoe_path ( struct aoe_device *aoedev ) { + struct { + SATA_DEVICE_PATH sata; + EFI_DEVICE_PATH_PROTOCOL end; + } satapath; + EFI_DEVICE_PATH_PROTOCOL *netpath; + EFI_DEVICE_PATH_PROTOCOL *path; + + /* Get network device path */ + netpath = efi_netdev_path ( aoedev->netdev ); + if ( ! netpath ) + goto err_netdev; + + /* Construct SATA path */ + memset ( &satapath, 0, sizeof ( satapath ) ); + satapath.sata.Header.Type = MESSAGING_DEVICE_PATH; + satapath.sata.Header.SubType = MSG_SATA_DP; + satapath.sata.Header.Length[0] = sizeof ( satapath.sata ); + satapath.sata.HBAPortNumber = aoedev->major; + satapath.sata.PortMultiplierPortNumber = aoedev->minor; + satapath.end.Type = END_DEVICE_PATH_TYPE; + satapath.end.SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + satapath.end.Length[0] = sizeof ( satapath.end ); + + /* Construct overall device path */ + path = efi_paths ( netpath, &satapath, NULL ); + if ( ! path ) + goto err_paths; + + /* Free temporary paths */ + free ( netpath ); + + return path; + + err_paths: + free ( netpath ); + err_netdev: + return NULL; +} + +/** + * Construct EFI device path for Fibre Channel device + * + * @v desc FCP device description + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_fcp_path ( struct fcp_description *desc ) { + struct { + FIBRECHANNELEX_DEVICE_PATH fc; + EFI_DEVICE_PATH_PROTOCOL end; + } __attribute__ (( packed )) *path; + + /* Allocate device path */ + path = zalloc ( sizeof ( *path ) ); + if ( ! path ) + return NULL; + + /* Construct device path */ + path->fc.Header.Type = MESSAGING_DEVICE_PATH; + path->fc.Header.SubType = MSG_FIBRECHANNELEX_DP; + path->fc.Header.Length[0] = sizeof ( path->fc ); + memcpy ( path->fc.WWN, &desc->wwn, sizeof ( path->fc.WWN ) ); + memcpy ( path->fc.Lun, &desc->lun, sizeof ( path->fc.Lun ) ); + path->end.Type = END_DEVICE_PATH_TYPE; + path->end.SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + path->end.Length[0] = sizeof ( path->end ); + + return &path->fc.Header; +} + +/** + * Construct EFI device path for Infiniband SRP device + * + * @v ib_srp Infiniband SRP device + * @ret path EFI device path, or NULL on error + */ +EFI_DEVICE_PATH_PROTOCOL * efi_ib_srp_path ( struct ib_srp_device *ib_srp ) { + const struct ipxe_ib_sbft *sbft = &ib_srp->sbft; + union ib_srp_target_port_id *id = + container_of ( &sbft->srp.target, union ib_srp_target_port_id, + srp ); + struct efi_device *efidev; + EFI_DEVICE_PATH_PROTOCOL *path; + INFINIBAND_DEVICE_PATH *ibpath; + EFI_DEVICE_PATH_PROTOCOL *end; + size_t prefix_len; + size_t len; + + /* Find parent EFI device */ + efidev = efidev_parent ( ib_srp->ibdev->dev ); + if ( ! efidev ) + return NULL; + + /* Calculate device path length */ + prefix_len = efi_path_len ( efidev->path ); + len = ( prefix_len + sizeof ( *ibpath ) + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + memcpy ( path, efidev->path, prefix_len ); + ibpath = ( ( ( void * ) path ) + prefix_len ); + ibpath->Header.Type = MESSAGING_DEVICE_PATH; + ibpath->Header.SubType = MSG_INFINIBAND_DP; + ibpath->Header.Length[0] = sizeof ( *ibpath ); + ibpath->ResourceFlags = INFINIBAND_RESOURCE_FLAG_STORAGE_PROTOCOL; + memcpy ( ibpath->PortGid, &sbft->ib.dgid, sizeof ( ibpath->PortGid ) ); + memcpy ( &ibpath->ServiceId, &sbft->ib.service_id, + sizeof ( ibpath->ServiceId ) ); + memcpy ( &ibpath->TargetPortId, &id->ib.ioc_guid, + sizeof ( ibpath->TargetPortId ) ); + memcpy ( &ibpath->DeviceId, &id->ib.id_ext, + sizeof ( ibpath->DeviceId ) ); + end = ( ( ( void * ) ibpath ) + sizeof ( *ibpath ) ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + + return path; +} + +/** + * Construct EFI device path for USB function + * + * @v func USB function + * @ret path EFI device path, or NULL on error + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_usb_path ( struct usb_function *func ) { + struct usb_device *usb = func->usb; + struct efi_device *efidev; + EFI_DEVICE_PATH_PROTOCOL *path; + EFI_DEVICE_PATH_PROTOCOL *end; + USB_DEVICE_PATH *usbpath; + unsigned int count; + size_t prefix_len; + size_t len; + + /* Sanity check */ + assert ( func->desc.count >= 1 ); + + /* Find parent EFI device */ + efidev = efidev_parent ( &func->dev ); + if ( ! efidev ) + return NULL; + + /* Calculate device path length */ + count = ( usb_depth ( usb ) + 1 ); + prefix_len = efi_path_len ( efidev->path ); + len = ( prefix_len + ( count * sizeof ( *usbpath ) ) + + sizeof ( *end ) ); + + /* Allocate device path */ + path = zalloc ( len ); + if ( ! path ) + return NULL; + + /* Construct device path */ + memcpy ( path, efidev->path, prefix_len ); + end = ( ( ( void * ) path ) + len - sizeof ( *end ) ); + end->Type = END_DEVICE_PATH_TYPE; + end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; + end->Length[0] = sizeof ( *end ); + usbpath = ( ( ( void * ) end ) - sizeof ( *usbpath ) ); + usbpath->InterfaceNumber = func->interface[0]; + for ( ; usb ; usbpath--, usb = usb->port->hub->usb ) { + usbpath->Header.Type = MESSAGING_DEVICE_PATH; + usbpath->Header.SubType = MSG_USB_DP; + usbpath->Header.Length[0] = sizeof ( *usbpath ); + usbpath->ParentPortNumber = ( usb->port->address - 1 ); + } + + return path; +} + +/** + * Describe object as an EFI device path + * + * @v intf Interface + * @ret path EFI device path, or NULL + * + * The caller is responsible for eventually calling free() on the + * allocated device path. + */ +EFI_DEVICE_PATH_PROTOCOL * efi_describe ( struct interface *intf ) { + struct interface *dest; + efi_describe_TYPE ( void * ) *op = + intf_get_dest_op ( intf, efi_describe, &dest ); + void *object = intf_object ( dest ); + EFI_DEVICE_PATH_PROTOCOL *path; + + if ( op ) { + path = op ( object ); + } else { + path = NULL; + } + + intf_put ( dest ); + return path; +} diff --git a/src/interface/efi/efi_pci.c b/src/interface/efi/efi_pci.c index c1f451c99..fda4aba0e 100644 --- a/src/interface/efi/efi_pci.c +++ b/src/interface/efi/efi_pci.c @@ -26,6 +26,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include #include @@ -62,15 +63,79 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); */ /** - * Locate EFI PCI root bridge I/O protocol + * Check for a matching PCI root bridge I/O protocol + * + * @v pci PCI device + * @v handle EFI PCI root bridge handle + * @v root EFI PCI root bridge I/O protocol + * @ret rc Return status code + */ +static int efipci_root_match ( struct pci_device *pci, EFI_HANDLE handle, + EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL *root ) { + union { + union acpi_resource *res; + void *raw; + } u; + unsigned int segment = PCI_SEG ( pci->busdevfn ); + unsigned int bus = PCI_BUS ( pci->busdevfn ); + unsigned int start; + unsigned int end; + unsigned int tag; + EFI_STATUS efirc; + int rc; + + /* Check segment number */ + if ( root->SegmentNumber != segment ) + return -ENOENT; + + /* Get ACPI resource descriptors */ + if ( ( efirc = root->Configuration ( root, &u.raw ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( pci, "EFIPCI " PCI_FMT " cannot get configuration for " + "%s: %s\n", PCI_ARGS ( pci ), + efi_handle_name ( handle ), strerror ( rc ) ); + return rc; + } + + /* Assume success if no bus number range descriptors are found */ + rc = 0; + + /* Parse resource descriptors */ + for ( ; ( ( tag = acpi_resource_tag ( u.res ) ) != ACPI_END_RESOURCE ) ; + u.res = acpi_resource_next ( u.res ) ) { + + /* Ignore anything other than a bus number range descriptor */ + if ( tag != ACPI_QWORD_ADDRESS_SPACE_RESOURCE ) + continue; + if ( u.res->qword.type != ACPI_ADDRESS_TYPE_BUS ) + continue; + + /* Check for a matching bus number */ + start = le64_to_cpu ( u.res->qword.min ); + end = ( start + le64_to_cpu ( u.res->qword.len ) ); + if ( ( bus >= start ) && ( bus < end ) ) + return 0; + + /* We have seen at least one non-matching range + * descriptor, so assume failure unless we find a + * subsequent match. + */ + rc = -ENOENT; + } + + return rc; +} + +/** + * Open EFI PCI root bridge I/O protocol * * @v pci PCI device * @ret handle EFI PCI root bridge handle * @ret root EFI PCI root bridge I/O protocol, or NULL if not found * @ret rc Return status code */ -static int efipci_root ( struct pci_device *pci, EFI_HANDLE *handle, - EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL **root ) { +static int efipci_root_open ( struct pci_device *pci, EFI_HANDLE *handle, + EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL **root ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_HANDLE *handles; UINTN num_handles; @@ -105,7 +170,7 @@ static int efipci_root ( struct pci_device *pci, EFI_HANDLE *handle, strerror ( rc ) ); continue; } - if ( u.root->SegmentNumber == PCI_SEG ( pci->busdevfn ) ) { + if ( efipci_root_match ( pci, *handle, u.root ) == 0 ) { *root = u.root; bs->FreePool ( handles ); return 0; @@ -123,6 +188,19 @@ static int efipci_root ( struct pci_device *pci, EFI_HANDLE *handle, return rc; } +/** + * Close EFI PCI root bridge I/O protocol + * + * @v handle EFI PCI root bridge handle + */ +static void efipci_root_close ( EFI_HANDLE handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + + /* Close protocol */ + bs->CloseProtocol ( handle, &efi_pci_root_bridge_io_protocol_guid, + efi_image_handle, handle ); +} + /** * Calculate EFI PCI configuration space address * @@ -149,14 +227,13 @@ static unsigned long efipci_address ( struct pci_device *pci, */ int efipci_read ( struct pci_device *pci, unsigned long location, void *value ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL *root; EFI_HANDLE handle; EFI_STATUS efirc; int rc; - /* Identify root bridge */ - if ( ( rc = efipci_root ( pci, &handle, &root ) ) != 0 ) + /* Open root bridge */ + if ( ( rc = efipci_root_open ( pci, &handle, &root ) ) != 0 ) goto err_root; /* Read from configuration space */ @@ -171,8 +248,7 @@ int efipci_read ( struct pci_device *pci, unsigned long location, } err_read: - bs->CloseProtocol ( handle, &efi_pci_root_bridge_io_protocol_guid, - efi_image_handle, handle ); + efipci_root_close ( handle ); err_root: return rc; } @@ -187,14 +263,13 @@ int efipci_read ( struct pci_device *pci, unsigned long location, */ int efipci_write ( struct pci_device *pci, unsigned long location, unsigned long value ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL *root; EFI_HANDLE handle; EFI_STATUS efirc; int rc; - /* Identify root bridge */ - if ( ( rc = efipci_root ( pci, &handle, &root ) ) != 0 ) + /* Open root bridge */ + if ( ( rc = efipci_root_open ( pci, &handle, &root ) ) != 0 ) goto err_root; /* Read from configuration space */ @@ -209,12 +284,84 @@ int efipci_write ( struct pci_device *pci, unsigned long location, } err_write: - bs->CloseProtocol ( handle, &efi_pci_root_bridge_io_protocol_guid, - efi_image_handle, handle ); + efipci_root_close ( handle ); err_root: return rc; } +/** + * Map PCI bus address as an I/O address + * + * @v bus_addr PCI bus address + * @v len Length of region + * @ret io_addr I/O address, or NULL on error + */ +void * efipci_ioremap ( struct pci_device *pci, unsigned long bus_addr, + size_t len ) { + union { + union acpi_resource *res; + void *raw; + } u; + EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL *root; + EFI_HANDLE handle; + unsigned int tag; + uint64_t offset; + uint64_t start; + uint64_t end; + EFI_STATUS efirc; + int rc; + + /* Open root bridge */ + if ( ( rc = efipci_root_open ( pci, &handle, &root ) ) != 0 ) + goto err_root; + + /* Get ACPI resource descriptors */ + if ( ( efirc = root->Configuration ( root, &u.raw ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( pci, "EFIPCI " PCI_FMT " cannot get configuration: " + "%s\n", PCI_ARGS ( pci ), strerror ( rc ) ); + goto err_config; + } + + /* Parse resource descriptors */ + for ( ; ( ( tag = acpi_resource_tag ( u.res ) ) != ACPI_END_RESOURCE ) ; + u.res = acpi_resource_next ( u.res ) ) { + + /* Ignore anything other than a memory range descriptor */ + if ( tag != ACPI_QWORD_ADDRESS_SPACE_RESOURCE ) + continue; + if ( u.res->qword.type != ACPI_ADDRESS_TYPE_MEM ) + continue; + + /* Ignore descriptors that do not cover this memory range */ + offset = le64_to_cpu ( u.res->qword.offset ); + start = ( offset + le64_to_cpu ( u.res->qword.min ) ); + end = ( start + le64_to_cpu ( u.res->qword.len ) ); + DBGC2 ( pci, "EFIPCI " PCI_FMT " found range [%08llx,%08llx) " + "-> [%08llx,%08llx)\n", PCI_ARGS ( pci ), start, end, + ( start - offset ), ( end - offset ) ); + if ( ( bus_addr < start ) || ( ( bus_addr + len ) > end ) ) + continue; + + /* Use this address space descriptor */ + DBGC2 ( pci, "EFIPCI " PCI_FMT " %08lx+%zx -> ", + PCI_ARGS ( pci ), bus_addr, len ); + bus_addr -= offset; + DBGC2 ( pci, "%08lx\n", bus_addr ); + break; + } + if ( tag == ACPI_END_RESOURCE ) { + DBGC ( pci, "EFIPCI " PCI_FMT " %08lx+%zx is not within " + "root bridge address space\n", + PCI_ARGS ( pci ), bus_addr, len ); + } + + err_config: + efipci_root_close ( handle ); + err_root: + return ioremap ( bus_addr, len ); +} + PROVIDE_PCIAPI_INLINE ( efi, pci_num_bus ); PROVIDE_PCIAPI_INLINE ( efi, pci_read_config_byte ); PROVIDE_PCIAPI_INLINE ( efi, pci_read_config_word ); @@ -222,6 +369,283 @@ PROVIDE_PCIAPI_INLINE ( efi, pci_read_config_dword ); PROVIDE_PCIAPI_INLINE ( efi, pci_write_config_byte ); PROVIDE_PCIAPI_INLINE ( efi, pci_write_config_word ); PROVIDE_PCIAPI_INLINE ( efi, pci_write_config_dword ); +PROVIDE_PCIAPI ( efi, pci_ioremap, efipci_ioremap ); + +/****************************************************************************** + * + * EFI PCI DMA mappings + * + ****************************************************************************** + */ + +/** + * Map buffer for DMA + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v addr Buffer address + * @v len Length of buffer + * @v flags Mapping flags + * @ret rc Return status code + */ +static int efipci_dma_map ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ) { + struct efi_pci_device *efipci = + container_of ( dma, struct efi_pci_device, pci.dma ); + struct pci_device *pci = &efipci->pci; + EFI_PCI_IO_PROTOCOL *pci_io = efipci->io; + EFI_PCI_IO_PROTOCOL_OPERATION op; + EFI_PHYSICAL_ADDRESS bus; + UINTN count; + VOID *mapping; + EFI_STATUS efirc; + int rc; + + /* Sanity check */ + assert ( map->dma == NULL ); + assert ( map->offset == 0 ); + assert ( map->token == NULL ); + + /* Determine operation */ + switch ( flags ) { + case DMA_TX: + op = EfiPciIoOperationBusMasterRead; + break; + case DMA_RX: + op = EfiPciIoOperationBusMasterWrite; + break; + default: + op = EfiPciIoOperationBusMasterCommonBuffer; + break; + } + + /* Map buffer (if non-zero length) */ + count = len; + if ( len ) { + if ( ( efirc = pci_io->Map ( pci_io, op, phys_to_virt ( addr ), + &count, &bus, &mapping ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( pci, "EFIPCI " PCI_FMT " cannot map %08lx+%zx: " + "%s\n", PCI_ARGS ( pci ), addr, len, + strerror ( rc ) ); + goto err_map; + } + } else { + bus = addr; + mapping = NULL; + } + + /* Check that full length was mapped. The UEFI specification + * allows for multiple mappings to be required, but even the + * EDK2 PCI device drivers will fail if a platform ever + * requires this. + */ + if ( count != len ) { + DBGC ( pci, "EFIPCI " PCI_FMT " attempted split mapping for " + "%08lx+%zx\n", PCI_ARGS ( pci ), addr, len ); + rc = -ENOTSUP; + goto err_len; + } + + /* Populate mapping */ + map->dma = dma; + map->offset = ( bus - addr ); + map->token = mapping; + + /* Increment mapping count (for debugging) */ + if ( DBG_LOG ) + dma->mapped++; + + return 0; + + err_len: + pci_io->Unmap ( pci_io, mapping ); + err_map: + return rc; +} + +/** + * Unmap buffer + * + * @v dma DMA device + * @v map DMA mapping + */ +static void efipci_dma_unmap ( struct dma_device *dma, + struct dma_mapping *map ) { + struct efi_pci_device *efipci = + container_of ( dma, struct efi_pci_device, pci.dma ); + EFI_PCI_IO_PROTOCOL *pci_io = efipci->io; + + /* Unmap buffer (if non-zero length) */ + if ( map->token ) + pci_io->Unmap ( pci_io, map->token ); + + /* Clear mapping */ + map->dma = NULL; + map->offset = 0; + map->token = NULL; + + /* Decrement mapping count (for debugging) */ + if ( DBG_LOG ) + dma->mapped--; +} + +/** + * Allocate and map DMA-coherent buffer + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +static void * efipci_dma_alloc ( struct dma_device *dma, + struct dma_mapping *map, + size_t len, size_t align __unused ) { + struct efi_pci_device *efipci = + container_of ( dma, struct efi_pci_device, pci.dma ); + struct pci_device *pci = &efipci->pci; + EFI_PCI_IO_PROTOCOL *pci_io = efipci->io; + unsigned int pages; + VOID *addr; + EFI_STATUS efirc; + int rc; + + /* Calculate number of pages */ + pages = ( ( len + EFI_PAGE_SIZE - 1 ) / EFI_PAGE_SIZE ); + + /* Allocate (page-aligned) buffer */ + if ( ( efirc = pci_io->AllocateBuffer ( pci_io, AllocateAnyPages, + EfiBootServicesData, pages, + &addr, 0 ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( pci, "EFIPCI " PCI_FMT " could not allocate %zd bytes: " + "%s\n", PCI_ARGS ( pci ), len, strerror ( rc ) ); + goto err_alloc; + } + + /* Map buffer */ + if ( ( rc = efipci_dma_map ( dma, map, virt_to_phys ( addr ), + ( pages * EFI_PAGE_SIZE ), + DMA_BI ) ) != 0 ) + goto err_map; + + /* Increment allocation count (for debugging) */ + if ( DBG_LOG ) + dma->allocated++; + + return addr; + + efipci_dma_unmap ( dma, map ); + err_map: + pci_io->FreeBuffer ( pci_io, pages, addr ); + err_alloc: + return NULL; +} + +/** + * Unmap and free DMA-coherent buffer + * + * @v dma DMA device + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +static void efipci_dma_free ( struct dma_device *dma, struct dma_mapping *map, + void *addr, size_t len ) { + struct efi_pci_device *efipci = + container_of ( dma, struct efi_pci_device, pci.dma ); + EFI_PCI_IO_PROTOCOL *pci_io = efipci->io; + unsigned int pages; + + /* Calculate number of pages */ + pages = ( ( len + EFI_PAGE_SIZE - 1 ) / EFI_PAGE_SIZE ); + + /* Unmap buffer */ + efipci_dma_unmap ( dma, map ); + + /* Free buffer */ + pci_io->FreeBuffer ( pci_io, pages, addr ); + + /* Decrement allocation count (for debugging) */ + if ( DBG_LOG ) + dma->allocated--; +} + +/** + * Allocate and map DMA-coherent buffer from external (user) memory + * + * @v dma DMA device + * @v map DMA mapping to fill in + * @v len Length of buffer + * @v align Physical alignment + * @ret addr Buffer address, or NULL on error + */ +static userptr_t efipci_dma_umalloc ( struct dma_device *dma, + struct dma_mapping *map, + size_t len, size_t align ) { + void *addr; + + addr = efipci_dma_alloc ( dma, map, len, align ); + return virt_to_user ( addr ); +} + +/** + * Unmap and free DMA-coherent buffer from external (user) memory + * + * @v dma DMA device + * @v map DMA mapping + * @v addr Buffer address + * @v len Length of buffer + */ +static void efipci_dma_ufree ( struct dma_device *dma, struct dma_mapping *map, + userptr_t addr, size_t len ) { + + efipci_dma_free ( dma, map, user_to_virt ( addr, 0 ), len ); +} + +/** + * Set addressable space mask + * + * @v dma DMA device + * @v mask Addressable space mask + */ +static void efipci_dma_set_mask ( struct dma_device *dma, physaddr_t mask ) { + struct efi_pci_device *efipci = + container_of ( dma, struct efi_pci_device, pci.dma ); + struct pci_device *pci = &efipci->pci; + EFI_PCI_IO_PROTOCOL *pci_io = efipci->io; + EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION op; + UINT64 attrs; + int is64; + EFI_STATUS efirc; + int rc; + + /* Set dual address cycle attribute for 64-bit capable devices */ + is64 = ( ( ( ( uint64_t ) mask ) + 1 ) == 0 ); + op = ( is64 ? EfiPciIoAttributeOperationEnable : + EfiPciIoAttributeOperationDisable ); + attrs = EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE; + if ( ( efirc = pci_io->Attributes ( pci_io, op, attrs, NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( pci, "EFIPCI " PCI_FMT " could not %sable DAC: %s\n", + PCI_ARGS ( pci ), ( is64 ? "en" : "dis" ), + strerror ( rc ) ); + /* Ignore failure: errors will manifest in mapping attempts */ + return; + } +} + +/** EFI PCI DMA operations */ +static struct dma_operations efipci_dma_operations = { + .map = efipci_dma_map, + .unmap = efipci_dma_unmap, + .alloc = efipci_dma_alloc, + .free = efipci_dma_free, + .umalloc = efipci_dma_umalloc, + .ufree = efipci_dma_ufree, + .set_mask = efipci_dma_set_mask, +}; /****************************************************************************** * @@ -235,11 +659,11 @@ PROVIDE_PCIAPI_INLINE ( efi, pci_write_config_dword ); * * @v device EFI device handle * @v attributes Protocol opening attributes - * @v pci PCI device to fill in + * @v efipci EFI PCI device to fill in * @ret rc Return status code */ int efipci_open ( EFI_HANDLE device, UINT32 attributes, - struct pci_device *pci ) { + struct efi_pci_device *efipci ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; union { EFI_PCI_IO_PROTOCOL *pci_io; @@ -259,6 +683,7 @@ int efipci_open ( EFI_HANDLE device, UINT32 attributes, efi_handle_name ( device ), strerror ( rc ) ); goto err_open_protocol; } + efipci->io = pci_io.pci_io; /* Get PCI bus:dev.fn address */ if ( ( efirc = pci_io.pci_io->GetLocation ( pci_io.pci_io, &pci_segment, @@ -270,9 +695,10 @@ int efipci_open ( EFI_HANDLE device, UINT32 attributes, goto err_get_location; } busdevfn = PCI_BUSDEVFN ( pci_segment, pci_bus, pci_dev, pci_fn ); - pci_init ( pci, busdevfn ); + pci_init ( &efipci->pci, busdevfn ); + dma_init ( &efipci->pci.dma, &efipci_dma_operations ); DBGCP ( device, "EFIPCI " PCI_FMT " is %s\n", - PCI_ARGS ( pci ), efi_handle_name ( device ) ); + PCI_ARGS ( &efipci->pci ), efi_handle_name ( device ) ); /* Try to enable I/O cycles, memory cycles, and bus mastering. * Some platforms will 'helpfully' report errors if these bits @@ -291,10 +717,10 @@ int efipci_open ( EFI_HANDLE device, UINT32 attributes, EFI_PCI_IO_ATTRIBUTE_BUS_MASTER, NULL ); /* Populate PCI device */ - if ( ( rc = pci_read_config ( pci ) ) != 0 ) { + if ( ( rc = pci_read_config ( &efipci->pci ) ) != 0 ) { DBGC ( device, "EFIPCI " PCI_FMT " cannot read PCI " "configuration: %s\n", - PCI_ARGS ( pci ), strerror ( rc ) ); + PCI_ARGS ( &efipci->pci ), strerror ( rc ) ); goto err_pci_read_config; } @@ -324,15 +750,15 @@ void efipci_close ( EFI_HANDLE device ) { * Get EFI PCI device information * * @v device EFI device handle - * @v pci PCI device to fill in + * @v efipci EFI PCI device to fill in * @ret rc Return status code */ -int efipci_info ( EFI_HANDLE device, struct pci_device *pci ) { +int efipci_info ( EFI_HANDLE device, struct efi_pci_device *efipci ) { int rc; /* Open PCI device, if possible */ if ( ( rc = efipci_open ( device, EFI_OPEN_PROTOCOL_GET_PROTOCOL, - pci ) ) != 0 ) + efipci ) ) != 0 ) return rc; /* Close PCI device */ @@ -355,23 +781,24 @@ int efipci_info ( EFI_HANDLE device, struct pci_device *pci ) { * @ret rc Return status code */ static int efipci_supported ( EFI_HANDLE device ) { - struct pci_device pci; + struct efi_pci_device efipci; int rc; /* Get PCI device information */ - if ( ( rc = efipci_info ( device, &pci ) ) != 0 ) + if ( ( rc = efipci_info ( device, &efipci ) ) != 0 ) return rc; /* Look for a driver */ - if ( ( rc = pci_find_driver ( &pci ) ) != 0 ) { + if ( ( rc = pci_find_driver ( &efipci.pci ) ) != 0 ) { DBGC ( device, "EFIPCI " PCI_FMT " (%04x:%04x class %06x) " - "has no driver\n", PCI_ARGS ( &pci ), pci.vendor, - pci.device, pci.class ); + "has no driver\n", PCI_ARGS ( &efipci.pci ), + efipci.pci.vendor, efipci.pci.device, + efipci.pci.class ); return rc; } DBGC ( device, "EFIPCI " PCI_FMT " (%04x:%04x class %06x) has driver " - "\"%s\"\n", PCI_ARGS ( &pci ), pci.vendor, pci.device, - pci.class, pci.id->name ); + "\"%s\"\n", PCI_ARGS ( &efipci.pci ), efipci.pci.vendor, + efipci.pci.device, efipci.pci.class, efipci.pci.id->name ); return 0; } @@ -384,12 +811,12 @@ static int efipci_supported ( EFI_HANDLE device ) { */ static int efipci_start ( struct efi_device *efidev ) { EFI_HANDLE device = efidev->device; - struct pci_device *pci; + struct efi_pci_device *efipci; int rc; /* Allocate PCI device */ - pci = zalloc ( sizeof ( *pci ) ); - if ( ! pci ) { + efipci = zalloc ( sizeof ( *efipci ) ); + if ( ! efipci ) { rc = -ENOMEM; goto err_alloc; } @@ -397,7 +824,7 @@ static int efipci_start ( struct efi_device *efidev ) { /* Open PCI device */ if ( ( rc = efipci_open ( device, ( EFI_OPEN_PROTOCOL_BY_DRIVER | EFI_OPEN_PROTOCOL_EXCLUSIVE ), - pci ) ) != 0 ) { + efipci ) ) != 0 ) { DBGC ( device, "EFIPCI %s could not open PCI device: %s\n", efi_handle_name ( device ), strerror ( rc ) ); DBGC_EFI_OPENERS ( device, device, &efi_pci_io_protocol_guid ); @@ -405,36 +832,36 @@ static int efipci_start ( struct efi_device *efidev ) { } /* Find driver */ - if ( ( rc = pci_find_driver ( pci ) ) != 0 ) { + if ( ( rc = pci_find_driver ( &efipci->pci ) ) != 0 ) { DBGC ( device, "EFIPCI " PCI_FMT " has no driver\n", - PCI_ARGS ( pci ) ); + PCI_ARGS ( &efipci->pci ) ); goto err_find_driver; } /* Mark PCI device as a child of the EFI device */ - pci->dev.parent = &efidev->dev; - list_add ( &pci->dev.siblings, &efidev->dev.children ); + efipci->pci.dev.parent = &efidev->dev; + list_add ( &efipci->pci.dev.siblings, &efidev->dev.children ); /* Probe driver */ - if ( ( rc = pci_probe ( pci ) ) != 0 ) { + if ( ( rc = pci_probe ( &efipci->pci ) ) != 0 ) { DBGC ( device, "EFIPCI " PCI_FMT " could not probe driver " - "\"%s\": %s\n", PCI_ARGS ( pci ), pci->id->name, - strerror ( rc ) ); + "\"%s\": %s\n", PCI_ARGS ( &efipci->pci ), + efipci->pci.id->name, strerror ( rc ) ); goto err_probe; } DBGC ( device, "EFIPCI " PCI_FMT " using driver \"%s\"\n", - PCI_ARGS ( pci ), pci->id->name ); + PCI_ARGS ( &efipci->pci ), efipci->pci.id->name ); - efidev_set_drvdata ( efidev, pci ); + efidev_set_drvdata ( efidev, efipci ); return 0; - pci_remove ( pci ); + pci_remove ( &efipci->pci ); err_probe: - list_del ( &pci->dev.siblings ); + list_del ( &efipci->pci.dev.siblings ); err_find_driver: efipci_close ( device ); err_open: - free ( pci ); + free ( efipci ); err_alloc: return rc; } @@ -445,13 +872,15 @@ static int efipci_start ( struct efi_device *efidev ) { * @v efidev EFI device */ static void efipci_stop ( struct efi_device *efidev ) { - struct pci_device *pci = efidev_get_drvdata ( efidev ); + struct efi_pci_device *efipci = efidev_get_drvdata ( efidev ); EFI_HANDLE device = efidev->device; - pci_remove ( pci ); - list_del ( &pci->dev.siblings ); + pci_remove ( &efipci->pci ); + list_del ( &efipci->pci.dev.siblings ); + assert ( efipci->pci.dma.mapped == 0 ); + assert ( efipci->pci.dma.allocated == 0 ); efipci_close ( device ); - free ( pci ); + free ( efipci ); } /** EFI PCI driver */ diff --git a/src/interface/efi/efi_pxe.c b/src/interface/efi/efi_pxe.c index a1f81df59..15224a5e4 100644 --- a/src/interface/efi/efi_pxe.c +++ b/src/interface/efi/efi_pxe.c @@ -41,6 +41,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include #include @@ -755,7 +756,8 @@ static EFI_STATUS EFIAPI efi_pxe_start ( EFI_PXE_BASE_CODE_PROTOCOL *base, sa_family_t family = ( use_ipv6 ? AF_INET6 : AF_INET ); int rc; - DBGC ( pxe, "PXE %s START %s\n", pxe->name, ( ipv6 ? "IPv6" : "IPv4" )); + DBGC ( pxe, "PXE %s START %s\n", + pxe->name, ( use_ipv6 ? "IPv6" : "IPv4" ) ); /* Initialise mode structure */ memset ( mode, 0, sizeof ( *mode ) ); @@ -1591,6 +1593,7 @@ int efi_pxe_install ( EFI_HANDLE handle, struct net_device *netdev ) { struct efi_pxe *pxe; struct in_addr ip; BOOLEAN use_ipv6; + int leak = 0; EFI_STATUS efirc; int rc; @@ -1643,14 +1646,23 @@ int efi_pxe_install ( EFI_HANDLE handle, struct net_device *netdev ) { pxe->name, efi_handle_name ( handle ) ); return 0; - bs->UninstallMultipleProtocolInterfaces ( + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( handle, &efi_pxe_base_code_protocol_guid, &pxe->base, &efi_apple_net_boot_protocol_guid, &pxe->apple, - NULL ); + NULL ) ) != 0 ) { + DBGC ( pxe, "PXE %s could not uninstall: %s\n", + pxe->name, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_pxe ( &pxe->base ); + efi_nullify_apple ( &pxe->apple ); err_install_protocol: - ref_put ( &pxe->refcnt ); + if ( ! leak ) + ref_put ( &pxe->refcnt ); err_alloc: + if ( leak ) + DBGC ( pxe, "PXE %s nullified and leaked\n", pxe->name ); return rc; } @@ -1662,6 +1674,8 @@ int efi_pxe_install ( EFI_HANDLE handle, struct net_device *netdev ) { void efi_pxe_uninstall ( EFI_HANDLE handle ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_pxe *pxe; + int leak = efi_shutdown_in_progress; + EFI_STATUS efirc; /* Locate PXE base code */ pxe = efi_pxe_find ( handle ); @@ -1675,13 +1689,25 @@ void efi_pxe_uninstall ( EFI_HANDLE handle ) { efi_pxe_stop ( &pxe->base ); /* Uninstall PXE base code protocol */ - bs->UninstallMultipleProtocolInterfaces ( + if ( ( ! efi_shutdown_in_progress ) && + ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( handle, &efi_pxe_base_code_protocol_guid, &pxe->base, &efi_apple_net_boot_protocol_guid, &pxe->apple, - NULL ); + NULL ) ) != 0 ) ) { + DBGC ( pxe, "PXE %s could not uninstall: %s\n", + pxe->name, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_pxe ( &pxe->base ); + efi_nullify_apple ( &pxe->apple ); /* Remove from list and drop list's reference */ list_del ( &pxe->list ); - ref_put ( &pxe->refcnt ); + if ( ! leak ) + ref_put ( &pxe->refcnt ); + + /* Report leakage, if applicable */ + if ( leak && ( ! efi_shutdown_in_progress ) ) + DBGC ( pxe, "PXE %s nullified and leaked\n", pxe->name ); } diff --git a/src/interface/efi/efi_smbios.c b/src/interface/efi/efi_smbios.c index 304f95a56..d7877b0aa 100644 --- a/src/interface/efi/efi_smbios.c +++ b/src/interface/efi/efi_smbios.c @@ -34,6 +34,10 @@ FILE_LICENCE ( GPL2_OR_LATER ); static struct smbios_entry *smbios_entry; EFI_USE_TABLE ( SMBIOS_TABLE, &smbios_entry, 0 ); +/** SMBIOS configuration table */ +static struct smbios3_entry *smbios3_entry; +EFI_USE_TABLE ( SMBIOS3_TABLE, &smbios3_entry, 0 ); + /** * Find SMBIOS * @@ -42,26 +46,34 @@ EFI_USE_TABLE ( SMBIOS_TABLE, &smbios_entry, 0 ); */ static int efi_find_smbios ( struct smbios *smbios ) { - if ( ! smbios_entry ) { - DBG ( "No SMBIOS table provided\n" ); - return -ENODEV; + /* Use 64-bit table if present */ + if ( smbios3_entry && ( smbios3_entry->signature == SMBIOS3_SIGNATURE ) ) { + smbios->address = phys_to_user ( smbios3_entry->smbios_address ); + smbios->len = smbios3_entry->smbios_len; + smbios->count = 0; + smbios->version = + SMBIOS_VERSION ( smbios3_entry->major, smbios3_entry->minor ); + DBG ( "Found 64-bit SMBIOS v%d.%d entry point at %p (%lx+%zx)\n", + smbios3_entry->major, smbios3_entry->minor, smbios3_entry, + user_to_phys ( smbios->address, 0 ), smbios->len ); + return 0; } - if ( smbios_entry->signature != SMBIOS_SIGNATURE ) { - DBG ( "Invalid SMBIOS signature\n" ); - return -ENODEV; + /* Otherwise, use 32-bit table if present */ + if ( smbios_entry && ( smbios_entry->signature == SMBIOS_SIGNATURE ) ) { + smbios->address = phys_to_user ( smbios_entry->smbios_address ); + smbios->len = smbios_entry->smbios_len; + smbios->count = smbios_entry->smbios_count; + smbios->version = + SMBIOS_VERSION ( smbios_entry->major, smbios_entry->minor ); + DBG ( "Found 32-bit SMBIOS v%d.%d entry point at %p (%lx+%zx)\n", + smbios_entry->major, smbios_entry->minor, smbios_entry, + user_to_phys ( smbios->address, 0 ), smbios->len ); + return 0; } - smbios->address = phys_to_user ( smbios_entry->smbios_address ); - smbios->len = smbios_entry->smbios_len; - smbios->count = smbios_entry->smbios_count; - smbios->version = - SMBIOS_VERSION ( smbios_entry->major, smbios_entry->minor ); - DBG ( "Found SMBIOS v%d.%d entry point at %p (%x+%zx)\n", - smbios_entry->major, smbios_entry->minor, smbios_entry, - smbios_entry->smbios_address, smbios->len ); - - return 0; + DBG ( "No SMBIOS table provided\n" ); + return -ENODEV; } PROVIDE_SMBIOS ( efi, find_smbios, efi_find_smbios ); diff --git a/src/interface/efi/efi_snp.c b/src/interface/efi/efi_snp.c index d648700f6..6649eb1b0 100644 --- a/src/interface/efi/efi_snp.c +++ b/src/interface/efi/efi_snp.c @@ -33,8 +33,10 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include #include +#include #include #include +#include #include #include #include @@ -46,7 +48,7 @@ static LIST_HEAD ( efi_snp_devices ); static int efi_snp_claimed; /** TPL prior to network devices being claimed */ -static EFI_TPL efi_snp_old_tpl; +static struct efi_saved_tpl efi_snp_saved_tpl; /* Downgrade user experience if configured to do so * @@ -190,9 +192,11 @@ efi_snp_start ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ) { DBGC ( snpdev, "SNPDEV %p START\n", snpdev ); - /* Fail if net device is currently claimed for use by iPXE */ - if ( efi_snp_claimed ) - return EFI_NOT_READY; + /* Allow start even if net device is currently claimed by iPXE */ + if ( efi_snp_claimed ) { + DBGC ( snpdev, "SNPDEV %p allowing start while claimed\n", + snpdev ); + } snpdev->started = 1; efi_snp_set_state ( snpdev ); @@ -233,24 +237,29 @@ efi_snp_stop ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ) { static EFI_STATUS EFIAPI efi_snp_initialize ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, UINTN extra_rx_bufsize, UINTN extra_tx_bufsize ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev = container_of ( snp, struct efi_snp_device, snp ); - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; int rc; DBGC ( snpdev, "SNPDEV %p INITIALIZE (%ld extra RX, %ld extra TX)\n", snpdev, ( ( unsigned long ) extra_rx_bufsize ), ( ( unsigned long ) extra_tx_bufsize ) ); - /* Fail if net device is currently claimed for use by iPXE */ + /* Do nothing if net device is currently claimed for use by + * iPXE. Do not return an error, because this will cause + * MnpDxe et al to fail to install the relevant child handles + * and to leave behind a partially initialised device handle + * that can cause a later system crash. + */ if ( efi_snp_claimed ) { - rc = -EAGAIN; - goto err_claimed; + DBGC ( snpdev, "SNPDEV %p ignoring initialization while " + "claimed\n", snpdev ); + return 0; } /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Open network device */ if ( ( rc = netdev_open ( snpdev->netdev ) ) != 0 ) { @@ -261,8 +270,7 @@ efi_snp_initialize ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, efi_snp_set_state ( snpdev ); err_open: - bs->RestoreTPL ( saved_tpl ); - err_claimed: + efi_restore_tpl ( &tpl ); return EFIRC ( rc ); } @@ -275,10 +283,9 @@ efi_snp_initialize ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, */ static EFI_STATUS EFIAPI efi_snp_reset ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, BOOLEAN ext_verify ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev = container_of ( snp, struct efi_snp_device, snp ); - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; int rc; DBGC ( snpdev, "SNPDEV %p RESET (%s extended verification)\n", @@ -291,7 +298,7 @@ efi_snp_reset ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, BOOLEAN ext_verify ) { } /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Close network device */ netdev_close ( snpdev->netdev ); @@ -307,7 +314,7 @@ efi_snp_reset ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, BOOLEAN ext_verify ) { efi_snp_set_state ( snpdev ); err_open: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); err_claimed: return EFIRC ( rc ); } @@ -320,10 +327,9 @@ efi_snp_reset ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, BOOLEAN ext_verify ) { */ static EFI_STATUS EFIAPI efi_snp_shutdown ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev = container_of ( snp, struct efi_snp_device, snp ); - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; DBGC ( snpdev, "SNPDEV %p SHUTDOWN\n", snpdev ); @@ -332,7 +338,7 @@ efi_snp_shutdown ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ) { return EFI_NOT_READY; /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Close network device */ netdev_close ( snpdev->netdev ); @@ -340,7 +346,7 @@ efi_snp_shutdown ( EFI_SIMPLE_NETWORK_PROTOCOL *snp ) { efi_snp_flush ( snpdev ); /* Restore TPL */ - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return 0; } @@ -544,10 +550,9 @@ efi_snp_nvdata ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, BOOLEAN read, static EFI_STATUS EFIAPI efi_snp_get_status ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, UINT32 *interrupts, VOID **txbuf ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev = container_of ( snp, struct efi_snp_device, snp ); - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; DBGC2 ( snpdev, "SNPDEV %p GET_STATUS", snpdev ); @@ -558,7 +563,7 @@ efi_snp_get_status ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, } /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Poll the network device */ efi_snp_poll ( snpdev ); @@ -583,7 +588,7 @@ efi_snp_get_status ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, } /* Restore TPL */ - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); DBGC2 ( snpdev, "\n" ); return 0; @@ -606,14 +611,13 @@ efi_snp_transmit ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, UINTN ll_header_len, UINTN len, VOID *data, EFI_MAC_ADDRESS *ll_src, EFI_MAC_ADDRESS *ll_dest, UINT16 *net_proto ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev = container_of ( snp, struct efi_snp_device, snp ); struct ll_protocol *ll_protocol = snpdev->netdev->ll_protocol; + struct efi_saved_tpl tpl; struct io_buffer *iobuf; size_t payload_len; unsigned int tx_fill; - EFI_TPL saved_tpl; int rc; DBGC2 ( snpdev, "SNPDEV %p TRANSMIT %p+%lx", snpdev, data, @@ -640,7 +644,7 @@ efi_snp_transmit ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, } /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Sanity checks */ if ( ll_header_len ) { @@ -725,7 +729,7 @@ efi_snp_transmit ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, snpdev->interrupts |= EFI_SIMPLE_NETWORK_TRANSMIT_INTERRUPT; /* Restore TPL */ - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return 0; @@ -735,7 +739,7 @@ efi_snp_transmit ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, free_iob ( iobuf ); err_alloc_iob: err_sanity: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); err_claimed: return EFIRC ( rc ); } @@ -757,17 +761,16 @@ efi_snp_receive ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, UINTN *ll_header_len, UINTN *len, VOID *data, EFI_MAC_ADDRESS *ll_src, EFI_MAC_ADDRESS *ll_dest, UINT16 *net_proto ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev = container_of ( snp, struct efi_snp_device, snp ); struct ll_protocol *ll_protocol = snpdev->netdev->ll_protocol; + struct efi_saved_tpl tpl; struct io_buffer *iobuf; const void *iob_ll_dest; const void *iob_ll_src; uint16_t iob_net_proto; unsigned int iob_flags; size_t copy_len; - EFI_TPL saved_tpl; int rc; DBGC2 ( snpdev, "SNPDEV %p RECEIVE %p(+%lx)", snpdev, data, @@ -780,7 +783,7 @@ efi_snp_receive ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, } /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Poll the network device */ efi_snp_poll ( snpdev ); @@ -829,7 +832,7 @@ efi_snp_receive ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, out_bad_ll_header: free_iob ( iobuf ); out_no_packet: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); err_claimed: return EFIRC ( rc ); } @@ -842,9 +845,8 @@ efi_snp_receive ( EFI_SIMPLE_NETWORK_PROTOCOL *snp, */ static VOID EFIAPI efi_snp_wait_for_packet ( EFI_EVENT event __unused, VOID *context ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev = context; - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; DBGCP ( snpdev, "SNPDEV %p WAIT_FOR_PACKET\n", snpdev ); @@ -857,13 +859,13 @@ static VOID EFIAPI efi_snp_wait_for_packet ( EFI_EVENT event __unused, return; /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Poll the network device */ efi_snp_poll ( snpdev ); /* Restore TPL */ - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); } /** SNP interface */ @@ -1623,13 +1625,9 @@ static int efi_snp_probe ( struct net_device *netdev ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_device *efidev; struct efi_snp_device *snpdev; - EFI_DEVICE_PATH_PROTOCOL *path_end; - MAC_ADDR_DEVICE_PATH *macpath; - VLAN_DEVICE_PATH *vlanpath; - size_t path_prefix_len = 0; unsigned int ifcnt; - unsigned int tag; void *interface; + int leak = 0; EFI_STATUS efirc; int rc; @@ -1713,41 +1711,13 @@ static int efi_snp_probe ( struct net_device *netdev ) { sizeof ( snpdev->name[0] ) ), "%s", netdev->name ); - /* Allocate the new device path */ - path_prefix_len = efi_devpath_len ( efidev->path ); - snpdev->path = zalloc ( path_prefix_len + sizeof ( *macpath ) + - sizeof ( *vlanpath ) + sizeof ( *path_end ) ); + /* Construct device path */ + snpdev->path = efi_netdev_path ( netdev ); if ( ! snpdev->path ) { rc = -ENOMEM; - goto err_alloc_device_path; + goto err_path; } - /* Populate the device path */ - memcpy ( snpdev->path, efidev->path, path_prefix_len ); - macpath = ( ( ( void * ) snpdev->path ) + path_prefix_len ); - memset ( macpath, 0, sizeof ( *macpath ) ); - macpath->Header.Type = MESSAGING_DEVICE_PATH; - macpath->Header.SubType = MSG_MAC_ADDR_DP; - macpath->Header.Length[0] = sizeof ( *macpath ); - memcpy ( &macpath->MacAddress, netdev->ll_addr, - netdev->ll_protocol->ll_addr_len ); - macpath->IfType = ntohs ( netdev->ll_protocol->ll_proto ); - if ( ( tag = vlan_tag ( netdev ) ) ) { - vlanpath = ( ( ( void * ) macpath ) + sizeof ( *macpath ) ); - memset ( vlanpath, 0, sizeof ( *vlanpath ) ); - vlanpath->Header.Type = MESSAGING_DEVICE_PATH; - vlanpath->Header.SubType = MSG_VLAN_DP; - vlanpath->Header.Length[0] = sizeof ( *vlanpath ); - vlanpath->VlanId = tag; - path_end = ( ( ( void * ) vlanpath ) + sizeof ( *vlanpath ) ); - } else { - path_end = ( ( ( void * ) macpath ) + sizeof ( *macpath ) ); - } - memset ( path_end, 0, sizeof ( *path_end ) ); - path_end->Type = END_DEVICE_PATH_TYPE; - path_end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; - path_end->Length[0] = sizeof ( *path_end ); - /* Install the SNP */ if ( ( efirc = bs->InstallMultipleProtocolInterfaces ( &snpdev->handle, @@ -1826,7 +1796,7 @@ static int efi_snp_probe ( struct net_device *netdev ) { list_del ( &snpdev->list ); if ( snpdev->package_list ) - efi_snp_hii_uninstall ( snpdev ); + leak |= efi_snp_hii_uninstall ( snpdev ); efi_child_del ( efidev->device, snpdev->handle ); err_efi_child_add: bs->CloseProtocol ( snpdev->handle, &efi_nii31_protocol_guid, @@ -1835,7 +1805,7 @@ static int efi_snp_probe ( struct net_device *netdev ) { bs->CloseProtocol ( snpdev->handle, &efi_nii_protocol_guid, efi_image_handle, snpdev->handle ); err_open_nii: - bs->UninstallMultipleProtocolInterfaces ( + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( snpdev->handle, &efi_simple_network_protocol_guid, &snpdev->snp, &efi_device_path_protocol_guid, snpdev->path, @@ -1843,17 +1813,30 @@ static int efi_snp_probe ( struct net_device *netdev ) { &efi_nii31_protocol_guid, &snpdev->nii, &efi_component_name2_protocol_guid, &snpdev->name2, &efi_load_file_protocol_guid, &snpdev->load_file, - NULL ); + NULL ) ) != 0 ) { + DBGC ( snpdev, "SNPDEV %p could not uninstall: %s\n", + snpdev, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_snp ( &snpdev->snp ); + efi_nullify_nii ( &snpdev->nii ); + efi_nullify_name2 ( &snpdev->name2 ); + efi_nullify_load_file ( &snpdev->load_file ); err_install_protocol_interface: - free ( snpdev->path ); - err_alloc_device_path: + if ( ! leak ) + free ( snpdev->path ); + err_path: bs->CloseEvent ( snpdev->snp.WaitForPacket ); err_create_event: err_ll_addr_len: - netdev_put ( netdev ); - free ( snpdev ); + if ( ! leak ) { + netdev_put ( netdev ); + free ( snpdev ); + } err_alloc_snp: err_no_efidev: + if ( leak ) + DBGC ( snpdev, "SNPDEV %p nullified and leaked\n", snpdev ); return rc; } @@ -1890,6 +1873,8 @@ static void efi_snp_notify ( struct net_device *netdev ) { static void efi_snp_remove ( struct net_device *netdev ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev; + int leak = efi_shutdown_in_progress; + EFI_STATUS efirc; /* Locate SNP device */ snpdev = efi_snp_demux ( netdev ); @@ -1901,13 +1886,14 @@ static void efi_snp_remove ( struct net_device *netdev ) { /* Uninstall the SNP */ list_del ( &snpdev->list ); if ( snpdev->package_list ) - efi_snp_hii_uninstall ( snpdev ); + leak |= efi_snp_hii_uninstall ( snpdev ); efi_child_del ( snpdev->efidev->device, snpdev->handle ); bs->CloseProtocol ( snpdev->handle, &efi_nii_protocol_guid, efi_image_handle, snpdev->handle ); bs->CloseProtocol ( snpdev->handle, &efi_nii31_protocol_guid, efi_image_handle, snpdev->handle ); - bs->UninstallMultipleProtocolInterfaces ( + if ( ( ! efi_shutdown_in_progress ) && + ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( snpdev->handle, &efi_simple_network_protocol_guid, &snpdev->snp, &efi_device_path_protocol_guid, snpdev->path, @@ -1915,11 +1901,26 @@ static void efi_snp_remove ( struct net_device *netdev ) { &efi_nii31_protocol_guid, &snpdev->nii, &efi_component_name2_protocol_guid, &snpdev->name2, &efi_load_file_protocol_guid, &snpdev->load_file, - NULL ); - free ( snpdev->path ); + NULL ) ) != 0 ) ) { + DBGC ( snpdev, "SNPDEV %p could not uninstall: %s\n", + snpdev, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_snp ( &snpdev->snp ); + efi_nullify_nii ( &snpdev->nii ); + efi_nullify_name2 ( &snpdev->name2 ); + efi_nullify_load_file ( &snpdev->load_file ); + if ( ! leak ) + free ( snpdev->path ); bs->CloseEvent ( snpdev->snp.WaitForPacket ); - netdev_put ( snpdev->netdev ); - free ( snpdev ); + if ( ! leak ) { + netdev_put ( snpdev->netdev ); + free ( snpdev ); + } + + /* Report leakage, if applicable */ + if ( leak && ( ! efi_shutdown_in_progress ) ) + DBGC ( snpdev, "SNPDEV %p nullified and leaked\n", snpdev ); } /** SNP driver */ @@ -1967,12 +1968,11 @@ struct efi_snp_device * last_opened_snpdev ( void ) { * @v delta Claim count change */ void efi_snp_add_claim ( int delta ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_snp_device *snpdev; /* Raise TPL if we are about to claim devices */ if ( ! efi_snp_claimed ) - efi_snp_old_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &efi_snp_saved_tpl ); /* Claim SNP devices */ efi_snp_claimed += delta; @@ -1984,5 +1984,5 @@ void efi_snp_add_claim ( int delta ) { /* Restore TPL if we have released devices */ if ( ! efi_snp_claimed ) - bs->RestoreTPL ( efi_snp_old_tpl ); + efi_restore_tpl ( &efi_snp_saved_tpl ); } diff --git a/src/interface/efi/efi_snp_hii.c b/src/interface/efi/efi_snp_hii.c index 651bef040..5d5f80cd7 100644 --- a/src/interface/efi/efi_snp_hii.c +++ b/src/interface/efi/efi_snp_hii.c @@ -63,7 +63,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include +#include #include /** EFI platform setup formset GUID */ @@ -247,16 +249,17 @@ static int efi_snp_hii_append ( struct efi_snp_device *snpdev __unused, const char *key, const char *value, wchar_t **results ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_STATUS efirc; size_t len; void *new; /* Allocate new string */ len = ( ( *results ? ( wcslen ( *results ) + 1 /* "&" */ ) : 0 ) + strlen ( key ) + 1 /* "=" */ + strlen ( value ) + 1 /* NUL */ ); - bs->AllocatePool ( EfiBootServicesData, ( len * sizeof ( wchar_t ) ), - &new ); - if ( ! new ) - return -ENOMEM; + if ( ( efirc = bs->AllocatePool ( EfiBootServicesData, + ( len * sizeof ( wchar_t ) ), + &new ) ) != 0 ) + return -EEFI ( efirc ); /* Populate string */ efi_snprintf ( new, len, "%ls%s%s=%s", ( *results ? *results : L"" ), @@ -657,7 +660,8 @@ int efi_snp_hii_install ( struct efi_snp_device *snpdev ) { VENDOR_DEVICE_PATH *vendor_path; EFI_DEVICE_PATH_PROTOCOL *path_end; size_t path_prefix_len; - int efirc; + int leak = 0; + EFI_STATUS efirc; int rc; /* Do nothing if HII database protocol is not supported */ @@ -679,7 +683,7 @@ int efi_snp_hii_install ( struct efi_snp_device *snpdev ) { } /* Allocate the new device path */ - path_prefix_len = efi_devpath_len ( snpdev->path ); + path_prefix_len = efi_path_len ( snpdev->path ); snpdev->hii_child_path = zalloc ( path_prefix_len + sizeof ( *vendor_path ) + sizeof ( *path_end ) ); @@ -749,23 +753,37 @@ int efi_snp_hii_install ( struct efi_snp_device *snpdev ) { efi_child_del ( snpdev->handle, snpdev->hii_child_handle ); err_efi_child_add: - bs->UninstallMultipleProtocolInterfaces ( + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( snpdev->hii_child_handle, &efi_hii_config_access_protocol_guid, &snpdev->hii, - NULL ); + NULL ) ) != 0 ) { + DBGC ( snpdev, "SNPDEV %p could not uninstall HII protocol: " + "%s\n", snpdev, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_hii ( &snpdev->hii ); err_install_protocol: - efihii->RemovePackageList ( efihii, snpdev->hii_handle ); + if ( ! leak ) + efihii->RemovePackageList ( efihii, snpdev->hii_handle ); err_new_package_list: - bs->UninstallMultipleProtocolInterfaces ( + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( snpdev->hii_child_handle, &efi_device_path_protocol_guid, snpdev->hii_child_path, - NULL ); + NULL ) ) != 0 ) { + DBGC ( snpdev, "SNPDEV %p could not uninstall HII path: %s\n", + snpdev, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } err_hii_child_handle: - free ( snpdev->hii_child_path ); - snpdev->hii_child_path = NULL; + if ( ! leak ) { + free ( snpdev->hii_child_path ); + snpdev->hii_child_path = NULL; + } err_alloc_child_path: - free ( snpdev->package_list ); - snpdev->package_list = NULL; + if ( ! leak ) { + free ( snpdev->package_list ); + snpdev->package_list = NULL; + } err_build_package_list: err_no_hii: return rc; @@ -775,27 +793,49 @@ int efi_snp_hii_install ( struct efi_snp_device *snpdev ) { * Uninstall HII protocol and package for SNP device * * @v snpdev SNP device + * @ret leak Uninstallation failed: leak memory */ -void efi_snp_hii_uninstall ( struct efi_snp_device *snpdev ) { +int efi_snp_hii_uninstall ( struct efi_snp_device *snpdev ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + int leak = efi_shutdown_in_progress; + EFI_STATUS efirc; /* Do nothing if HII database protocol is not supported */ if ( ! efihii ) - return; + return 0; /* Uninstall protocols and remove package list */ efi_child_del ( snpdev->handle, snpdev->hii_child_handle ); - bs->UninstallMultipleProtocolInterfaces ( + if ( ( ! efi_shutdown_in_progress ) && + ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( snpdev->hii_child_handle, &efi_hii_config_access_protocol_guid, &snpdev->hii, - NULL ); - efihii->RemovePackageList ( efihii, snpdev->hii_handle ); - bs->UninstallMultipleProtocolInterfaces ( + NULL ) ) != 0 ) ) { + DBGC ( snpdev, "SNPDEV %p could not uninstall HII protocol: " + "%s\n", snpdev, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_hii ( &snpdev->hii ); + if ( ! leak ) + efihii->RemovePackageList ( efihii, snpdev->hii_handle ); + if ( ( ! efi_shutdown_in_progress ) && + ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( snpdev->hii_child_handle, &efi_device_path_protocol_guid, snpdev->hii_child_path, - NULL ); - free ( snpdev->hii_child_path ); - snpdev->hii_child_path = NULL; - free ( snpdev->package_list ); - snpdev->package_list = NULL; + NULL ) ) != 0 ) ) { + DBGC ( snpdev, "SNPDEV %p could not uninstall HII path: %s\n", + snpdev, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + if ( ! leak ) { + free ( snpdev->hii_child_path ); + snpdev->hii_child_path = NULL; + free ( snpdev->package_list ); + snpdev->package_list = NULL; + } + + /* Report leakage, if applicable */ + if ( leak && ( ! efi_shutdown_in_progress ) ) + DBGC ( snpdev, "SNPDEV %p HII nullified and leaked\n", snpdev ); + return leak; } diff --git a/src/interface/efi/efi_timer.c b/src/interface/efi/efi_timer.c index 8f40cb81a..405cd3454 100644 --- a/src/interface/efi/efi_timer.c +++ b/src/interface/efi/efi_timer.c @@ -97,8 +97,17 @@ static unsigned long efi_currticks ( void ) { * gain us any substantive benefits (since even with such * calls we would still be suffering from the limitations of a * polling design), we instead choose to run at TPL_CALLBACK - * almost all of the time, dropping to TPL_APPLICATION to - * allow timer ticks to occur. + * almost all of the time, dropping to a lower TPL to allow + * timer ticks to occur. + * + * We record the external TPL at the point of entry into iPXE, + * and drop back only as far as this external TPL. This + * avoids the unexpected behaviour that may arise from having + * iPXE temporarily drop to TPL_APPLICATION in the middle of + * an entry point invoked at TPL_CALLBACK. The side effect is + * that iPXE's view of the system time is effectively frozen + * for the duration of any call made in to iPXE at + * TPL_CALLBACK or higher. * * * For added excitement, UEFI provides no clean way for device @@ -127,7 +136,7 @@ static unsigned long efi_currticks ( void ) { if ( efi_shutdown_in_progress ) { efi_jiffies++; } else { - bs->RestoreTPL ( TPL_APPLICATION ); + bs->RestoreTPL ( efi_external_tpl ); bs->RaiseTPL ( TPL_CALLBACK ); } diff --git a/src/interface/efi/efi_usb.c b/src/interface/efi/efi_usb.c index 48274f1d6..28dfc8680 100644 --- a/src/interface/efi/efi_usb.c +++ b/src/interface/efi/efi_usb.c @@ -30,8 +30,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include +#include #include +#include #include #include @@ -73,13 +74,14 @@ static const char * efi_usb_direction_name ( EFI_USB_DATA_DIRECTION direction ){ static VOID EFIAPI efi_usb_timer ( EFI_EVENT event __unused, VOID *context ) { struct efi_usb_endpoint *usbep = context; - struct usb_bus *bus = usbep->usbintf->usbdev->usb->port->hub->bus; + struct usb_function *func = usbep->usbintf->usbdev->func; /* Poll bus */ - usb_poll ( bus ); + usb_poll ( func->usb->port->hub->bus ); /* Refill endpoint */ - usb_refill ( &usbep->ep ); + if ( usbep->ep.open ) + usb_refill ( &usbep->ep ); } /** @@ -117,6 +119,21 @@ static int efi_usb_mtu ( struct efi_usb_interface *usbintf, return -ENOENT; } +/** + * Check if endpoint is open + * + * @v usbintf EFI USB interface + * @v endpoint Endpoint address + * @ret is_open Endpoint is open + */ +static int efi_usb_is_open ( struct efi_usb_interface *usbintf, + unsigned int endpoint ) { + unsigned int index = USB_ENDPOINT_IDX ( endpoint ); + struct efi_usb_endpoint *usbep = usbintf->endpoint[index]; + + return ( usbep && usbep->ep.open ); +} + /** * Open endpoint * @@ -139,6 +156,22 @@ static int efi_usb_open ( struct efi_usb_interface *usbintf, EFI_STATUS efirc; int rc; + /* Allocate structure, if needed. Once allocated, we leave + * the endpoint structure in place until the device is + * removed, to work around external UEFI code that closes the + * endpoint at illegal times. + */ + usbep = usbintf->endpoint[index]; + if ( ! usbep ) { + usbep = zalloc ( sizeof ( *usbep ) ); + if ( ! usbep ) { + rc = -ENOMEM; + goto err_alloc; + } + usbep->usbintf = usbintf; + usbintf->endpoint[index] = usbep; + } + /* Get endpoint MTU */ mtu = efi_usb_mtu ( usbintf, endpoint ); if ( mtu < 0 ) { @@ -147,13 +180,7 @@ static int efi_usb_open ( struct efi_usb_interface *usbintf, } /* Allocate and initialise structure */ - usbep = zalloc ( sizeof ( *usbep ) ); - if ( ! usbep ) { - rc = -ENOMEM; - goto err_alloc; - } - usbep->usbintf = usbintf; - usb_endpoint_init ( &usbep->ep, usbdev->usb, driver ); + usb_endpoint_init ( &usbep->ep, usbdev->func->usb, driver ); usb_endpoint_describe ( &usbep->ep, endpoint, attributes, mtu, 0, ( interval << 3 /* microframes */ ) ); @@ -164,9 +191,6 @@ static int efi_usb_open ( struct efi_usb_interface *usbintf, strerror ( rc ) ); goto err_open; } - - /* Record opened endpoint */ - usbintf->endpoint[index] = usbep; DBGC ( usbdev, "USBDEV %s %s opened\n", usbintf->name, usb_endpoint_name ( &usbep->ep ) ); @@ -185,12 +209,10 @@ static int efi_usb_open ( struct efi_usb_interface *usbintf, bs->CloseEvent ( usbep->event ); err_event: - usbintf->endpoint[index] = usbep; usb_endpoint_close ( &usbep->ep ); err_open: - free ( usbep ); - err_alloc: err_mtu: + err_alloc: return rc; } @@ -216,12 +238,6 @@ static void efi_usb_close ( struct efi_usb_endpoint *usbep ) { usb_endpoint_close ( &usbep->ep ); DBGC ( usbdev, "USBDEV %s %s closed\n", usbintf->name, usb_endpoint_name ( &usbep->ep ) ); - - /* Free endpoint */ - free ( usbep ); - - /* Record closed endpoint */ - usbintf->endpoint[index] = NULL; } /** @@ -236,11 +252,31 @@ static void efi_usb_close_all ( struct efi_usb_interface *usbintf ) { for ( i = 0 ; i < ( sizeof ( usbintf->endpoint ) / sizeof ( usbintf->endpoint[0] ) ) ; i++ ) { usbep = usbintf->endpoint[i]; - if ( usbep ) + if ( usbep && usbep->ep.open ) efi_usb_close ( usbep ); } } +/** + * Free all endpoints + * + * @v usbintf EFI USB interface + */ +static void efi_usb_free_all ( struct efi_usb_interface *usbintf ) { + struct efi_usb_endpoint *usbep; + unsigned int i; + + for ( i = 0 ; i < ( sizeof ( usbintf->endpoint ) / + sizeof ( usbintf->endpoint[0] ) ) ; i++ ) { + usbep = usbintf->endpoint[i]; + if ( usbep ) { + assert ( ! usbep->ep.open ); + free ( usbep ); + usbintf->endpoint[i] = NULL; + } + } +} + /** * Complete synchronous transfer * @@ -286,7 +322,7 @@ static int efi_usb_sync_transfer ( struct efi_usb_interface *usbintf, int rc; /* Open endpoint, if applicable */ - if ( ( ! usbintf->endpoint[index] ) && + if ( ( ! efi_usb_is_open ( usbintf, endpoint ) ) && ( ( rc = efi_usb_open ( usbintf, endpoint, attributes, 0, &efi_usb_sync_driver ) ) != 0 ) ) { goto err_open; @@ -319,7 +355,7 @@ static int efi_usb_sync_transfer ( struct efi_usb_interface *usbintf, for ( i = 0 ; ( ( timeout == 0 ) || ( i < timeout ) ) ; i++ ) { /* Poll bus */ - usb_poll ( usbdev->usb->port->hub->bus ); + usb_poll ( usbdev->func->usb->port->hub->bus ); /* Check for completion */ if ( usbep->rc != -EINPROGRESS ) { @@ -377,15 +413,21 @@ static void efi_usb_async_complete ( struct usb_endpoint *ep, goto drop; /* Construct status */ - status = ( ( rc == 0 ) ? 0 : EFI_USB_ERR_STALL ); + status = ( ( rc == 0 ) ? 0 : EFI_USB_ERR_SYSTEM ); - /* Report completion */ - usbep->callback ( iobuf->data, iob_len ( iobuf ), usbep->context, - status ); + /* Report completion, if applicable */ + if ( usbep->callback ) { + usbep->callback ( iobuf->data, iob_len ( iobuf ), + usbep->context, status ); + } drop: - /* Recycle I/O buffer */ - usb_recycle ( &usbep->ep, iobuf ); + /* Recycle or free I/O buffer */ + if ( usbep->ep.open ) { + usb_recycle ( &usbep->ep, iobuf ); + } else { + free_iob ( iobuf ); + } } /** Asynchronous endpoint operations */ @@ -416,6 +458,10 @@ static int efi_usb_async_start ( struct efi_usb_interface *usbintf, EFI_STATUS efirc; int rc; + /* Close endpoint, if applicable */ + if ( efi_usb_is_open ( usbintf, endpoint ) ) + efi_usb_close ( usbintf->endpoint[index] ); + /* Open endpoint */ if ( ( rc = efi_usb_open ( usbintf, endpoint, USB_ENDPOINT_ATTR_INTERRUPT, interval, @@ -451,6 +497,8 @@ static int efi_usb_async_start ( struct efi_usb_interface *usbintf, bs->SetTimer ( usbep->event, TimerCancel, 0 ); err_timer: err_prefill: + usbep->callback = NULL; + usbep->context = NULL; efi_usb_close ( usbep ); err_open: return rc; @@ -469,15 +517,16 @@ static void efi_usb_async_stop ( struct efi_usb_interface *usbintf, unsigned int index = USB_ENDPOINT_IDX ( endpoint ); /* Do nothing if endpoint is already closed */ - usbep = usbintf->endpoint[index]; - if ( ! usbep ) + if ( ! efi_usb_is_open ( usbintf, endpoint ) ) return; + usbep = usbintf->endpoint[index]; /* Stop timer */ bs->SetTimer ( usbep->event, TimerCancel, 0 ); - /* Close endpoint */ - efi_usb_close ( usbep ); + /* Clear callback parameters */ + usbep->callback = NULL; + usbep->context = NULL; } /****************************************************************************** @@ -505,7 +554,6 @@ efi_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio, EFI_USB_DATA_DIRECTION direction, UINT32 timeout, VOID *data, UINTN len, UINT32 *status ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_usb_interface *usbintf = container_of ( usbio, struct efi_usb_interface, usbio ); struct efi_usb_device *usbdev = usbintf->usbdev; @@ -513,7 +561,7 @@ efi_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio, USB_REQUEST_TYPE ( packet->Request ) ); unsigned int value = le16_to_cpu ( packet->Value ); unsigned int index = le16_to_cpu ( packet->Index ); - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; int rc; DBGC2 ( usbdev, "USBDEV %s control %04x:%04x:%04x:%04x %s %dms " @@ -523,7 +571,7 @@ efi_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio, ( ( size_t ) len ) ); /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Clear status */ *status = 0; @@ -548,14 +596,13 @@ efi_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio, efi_usb_close_all ( usbintf ); /* Issue control transfer */ - if ( ( rc = usb_control ( usbdev->usb, request, value, index, + if ( ( rc = usb_control ( usbdev->func->usb, request, value, index, data, len ) ) != 0 ) { DBGC ( usbdev, "USBDEV %s control %04x:%04x:%04x:%04x %p+%zx " "failed: %s\n", usbintf->name, request, value, index, le16_to_cpu ( packet->Length ), data, ( ( size_t ) len ), strerror ( rc ) ); - /* Assume that any error represents a stall */ - *status = EFI_USB_ERR_STALL; + *status = EFI_USB_ERR_SYSTEM; goto err_control; } @@ -568,7 +615,7 @@ efi_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio, err_control: err_change_config: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return EFIRC ( rc ); } @@ -586,12 +633,11 @@ efi_usb_control_transfer ( EFI_USB_IO_PROTOCOL *usbio, static EFI_STATUS EFIAPI efi_usb_bulk_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, VOID *data, UINTN *len, UINTN timeout, UINT32 *status ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_usb_interface *usbintf = container_of ( usbio, struct efi_usb_interface, usbio ); struct efi_usb_device *usbdev = usbintf->usbdev; size_t actual = *len; - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; int rc; DBGC2 ( usbdev, "USBDEV %s bulk %s %p+%zx %dms\n", usbintf->name, @@ -599,7 +645,7 @@ efi_usb_bulk_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, VOID *data, ( ( size_t ) *len ), ( ( unsigned int ) timeout ) ); /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Clear status */ *status = 0; @@ -614,7 +660,7 @@ efi_usb_bulk_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, VOID *data, } err_transfer: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return EFIRC ( rc ); } @@ -633,12 +679,11 @@ static EFI_STATUS EFIAPI efi_usb_sync_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, VOID *data, UINTN *len, UINTN timeout, UINT32 *status ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_usb_interface *usbintf = container_of ( usbio, struct efi_usb_interface, usbio ); struct efi_usb_device *usbdev = usbintf->usbdev; size_t actual = *len; - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; int rc; DBGC2 ( usbdev, "USBDEV %s sync intr %s %p+%zx %dms\n", usbintf->name, @@ -646,7 +691,7 @@ efi_usb_sync_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, ( ( size_t ) *len ), ( ( unsigned int ) timeout ) ); /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Clear status */ *status = 0; @@ -661,7 +706,7 @@ efi_usb_sync_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, } err_transfer: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return EFIRC ( rc ); } @@ -682,11 +727,10 @@ efi_usb_async_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, BOOLEAN start, UINTN interval, UINTN len, EFI_ASYNC_USB_TRANSFER_CALLBACK callback, VOID *context ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct efi_usb_interface *usbintf = container_of ( usbio, struct efi_usb_interface, usbio ); struct efi_usb_device *usbdev = usbintf->usbdev; - EFI_TPL saved_tpl; + struct efi_saved_tpl tpl; int rc; DBGC2 ( usbdev, "USBDEV %s async intr %s len %#zx int %d %p/%p\n", @@ -696,7 +740,7 @@ efi_usb_async_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, callback, context ); /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Start/stop transfer as applicable */ if ( start ) { @@ -718,7 +762,7 @@ efi_usb_async_interrupt_transfer ( EFI_USB_IO_PROTOCOL *usbio, UINT8 endpoint, } err_start: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return EFIRC ( rc ); } @@ -796,7 +840,7 @@ efi_usb_get_device_descriptor ( EFI_USB_IO_PROTOCOL *usbio, DBGC2 ( usbdev, "USBDEV %s get device descriptor\n", usbintf->name ); /* Copy cached device descriptor */ - memcpy ( efidesc, &usbdev->usb->device, sizeof ( *efidesc ) ); + memcpy ( efidesc, &usbdev->func->usb->device, sizeof ( *efidesc ) ); return 0; } @@ -914,9 +958,9 @@ efi_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT16 language, container_of ( usbio, struct efi_usb_interface, usbio ); struct efi_usb_device *usbdev = usbintf->usbdev; struct usb_descriptor_header header; + struct efi_saved_tpl tpl; VOID *buffer; size_t len; - EFI_TPL saved_tpl; EFI_STATUS efirc; int rc; @@ -924,11 +968,12 @@ efi_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT16 language, usbintf->name, language, index ); /* Raise TPL */ - saved_tpl = bs->RaiseTPL ( TPL_CALLBACK ); + efi_raise_tpl ( &tpl ); /* Read descriptor header */ - if ( ( rc = usb_get_descriptor ( usbdev->usb, 0, USB_STRING_DESCRIPTOR, - index, language, &header, + if ( ( rc = usb_get_descriptor ( usbdev->func->usb, 0, + USB_STRING_DESCRIPTOR, index, + language, &header, sizeof ( header ) ) ) != 0 ) { DBGC ( usbdev, "USBDEV %s could not get string %d:%d " "descriptor header: %s\n", usbintf->name, language, @@ -936,6 +981,12 @@ efi_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT16 language, goto err_get_header; } len = header.len; + if ( len < sizeof ( header ) ) { + DBGC ( usbdev, "USBDEV %s underlength string %d:%d\n", + usbintf->name, language, index ); + rc = -EINVAL; + goto err_len; + } /* Allocate buffer */ if ( ( efirc = bs->AllocatePool ( EfiBootServicesData, len, @@ -945,9 +996,9 @@ efi_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT16 language, } /* Read whole descriptor */ - if ( ( rc = usb_get_descriptor ( usbdev->usb, 0, USB_STRING_DESCRIPTOR, - index, language, buffer, - len ) ) != 0 ) { + if ( ( rc = usb_get_descriptor ( usbdev->func->usb, 0, + USB_STRING_DESCRIPTOR, index, + language, buffer, len ) ) != 0 ) { DBGC ( usbdev, "USBDEV %s could not get string %d:%d " "descriptor: %s\n", usbintf->name, language, index, strerror ( rc ) ); @@ -960,7 +1011,7 @@ efi_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT16 language, memset ( ( buffer + len - sizeof ( header ) ), 0, sizeof ( **string ) ); /* Restore TPL */ - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); /* Return allocated string */ *string = buffer; @@ -969,8 +1020,9 @@ efi_usb_get_string_descriptor ( EFI_USB_IO_PROTOCOL *usbio, UINT16 language, err_get_descriptor: bs->FreePool ( buffer ); err_alloc: + err_len: err_get_header: - bs->RestoreTPL ( saved_tpl ); + efi_restore_tpl ( &tpl ); return EFIRC ( rc ); } @@ -992,9 +1044,8 @@ efi_usb_get_supported_languages ( EFI_USB_IO_PROTOCOL *usbio, DBGC2 ( usbdev, "USBDEV %s get supported languages\n", usbintf->name ); /* Return cached supported languages */ - *languages = ( ( ( void * ) usbdev->languages ) + - sizeof ( *(usbdev->languages) ) ); - *len = usbdev->languages->len; + *languages = usbdev->lang; + *len = usbdev->lang_len; return 0; } @@ -1056,25 +1107,14 @@ static EFI_USB_IO_PROTOCOL efi_usb_io_protocol = { static int efi_usb_install ( struct efi_usb_device *usbdev, unsigned int interface ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; - struct efi_device *efidev = usbdev->efidev; + struct usb_function *func = usbdev->func; struct efi_usb_interface *usbintf; - struct usb_device *usb; - EFI_DEVICE_PATH_PROTOCOL *path_end; - USB_DEVICE_PATH *usbpath; - unsigned int path_count; - size_t path_prefix_len; - size_t path_len; + int leak = 0; EFI_STATUS efirc; int rc; - /* Calculate device path length */ - path_count = ( usb_depth ( usbdev->usb ) + 1 ); - path_prefix_len = efi_devpath_len ( efidev->path ); - path_len = ( path_prefix_len + ( path_count * sizeof ( *usbpath ) ) + - sizeof ( *path_end ) ); - /* Allocate and initialise structure */ - usbintf = zalloc ( sizeof ( *usbintf ) + path_len ); + usbintf = zalloc ( sizeof ( *usbintf ) ); if ( ! usbintf ) { rc = -ENOMEM; goto err_alloc; @@ -1085,22 +1125,12 @@ static int efi_usb_install ( struct efi_usb_device *usbdev, usbintf->interface = interface; memcpy ( &usbintf->usbio, &efi_usb_io_protocol, sizeof ( usbintf->usbio ) ); - usbintf->path = ( ( ( void * ) usbintf ) + sizeof ( *usbintf ) ); /* Construct device path */ - memcpy ( usbintf->path, efidev->path, path_prefix_len ); - path_end = ( ( ( void * ) usbintf->path ) + path_len - - sizeof ( *path_end ) ); - path_end->Type = END_DEVICE_PATH_TYPE; - path_end->SubType = END_ENTIRE_DEVICE_PATH_SUBTYPE; - path_end->Length[0] = sizeof ( *path_end ); - usbpath = ( ( ( void * ) path_end ) - sizeof ( *usbpath ) ); - usbpath->InterfaceNumber = interface; - for ( usb = usbdev->usb ; usb ; usbpath--, usb = usb->port->hub->usb ) { - usbpath->Header.Type = MESSAGING_DEVICE_PATH; - usbpath->Header.SubType = MSG_USB_DP; - usbpath->Header.Length[0] = sizeof ( *usbpath ); - usbpath->ParentPortNumber = usb->port->address; + usbintf->path = efi_usb_path ( func ); + if ( ! usbintf->path ) { + rc = -ENODEV; + goto err_path; } /* Add to list of interfaces */ @@ -1122,16 +1152,30 @@ static int efi_usb_install ( struct efi_usb_device *usbdev, usbintf->name, efi_handle_name ( usbintf->handle ) ); return 0; - efi_usb_close_all ( usbintf ); - bs->UninstallMultipleProtocolInterfaces ( + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( usbintf->handle, &efi_usb_io_protocol_guid, &usbintf->usbio, &efi_device_path_protocol_guid, usbintf->path, - NULL ); + NULL ) ) != 0 ) { + DBGC ( usbdev, "USBDEV %s could not uninstall: %s\n", + usbintf->name, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_usbio ( &usbintf->usbio ); err_install_protocol: + efi_usb_close_all ( usbintf ); + efi_usb_free_all ( usbintf ); list_del ( &usbintf->list ); - free ( usbintf ); + if ( ! leak ) + free ( usbintf->path ); + err_path: + if ( ! leak ) + free ( usbintf ); err_alloc: + if ( leak ) { + DBGC ( usbdev, "USBDEV %s nullified and leaked\n", + usbintf->name ); + } return rc; } @@ -1142,22 +1186,53 @@ static int efi_usb_install ( struct efi_usb_device *usbdev, */ static void efi_usb_uninstall ( struct efi_usb_interface *usbintf ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_usb_device *usbdev = usbintf->usbdev; + int leak = efi_shutdown_in_progress; + EFI_STATUS efirc; - /* Close all endpoints */ - efi_usb_close_all ( usbintf ); + DBGC ( usbdev, "USBDEV %s uninstalling %s\n", + usbintf->name, efi_handle_name ( usbintf->handle ) ); + + /* Disconnect controllers. This should not be necessary, but + * seems to be required on some platforms to avoid failures + * when uninstalling protocols. + */ + if ( ! efi_shutdown_in_progress ) + bs->DisconnectController ( usbintf->handle, NULL, NULL ); /* Uninstall protocols */ - bs->UninstallMultipleProtocolInterfaces ( + if ( ( ! efi_shutdown_in_progress ) && + ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( usbintf->handle, &efi_usb_io_protocol_guid, &usbintf->usbio, &efi_device_path_protocol_guid, usbintf->path, - NULL ); + NULL ) ) != 0 ) ) { + DBGC ( usbdev, "USBDEV %s could not uninstall: %s\n", + usbintf->name, strerror ( -EEFI ( efirc ) ) ); + leak = 1; + } + efi_nullify_usbio ( &usbintf->usbio ); + + /* Close and free all endpoints */ + efi_usb_close_all ( usbintf ); + efi_usb_free_all ( usbintf ); /* Remove from list of interfaces */ list_del ( &usbintf->list ); + /* Free device path */ + if ( ! leak ) + free ( usbintf->path ); + /* Free interface */ - free ( usbintf ); + if ( ! leak ) + free ( usbintf ); + + /* Report leakage, if applicable */ + if ( leak && ( ! efi_shutdown_in_progress ) ) { + DBGC ( usbdev, "USBDEV %s nullified and leaked\n", + usbintf->name ); + } } /** @@ -1189,19 +1264,13 @@ static int efi_usb_probe ( struct usb_function *func, struct usb_device *usb = func->usb; struct efi_usb_device *usbdev; struct efi_usb_interface *usbintf; - struct efi_device *efidev; struct usb_descriptor_header header; + struct usb_descriptor_header *lang; size_t config_len; + size_t lang_len; unsigned int i; int rc; - /* Find parent EFI device */ - efidev = efidev_parent ( &func->dev ); - if ( ! efidev ) { - rc = -ENOTTY; - goto err_no_efidev; - } - /* Get configuration length */ config_len = le16_to_cpu ( config->len ); @@ -1211,27 +1280,30 @@ static int efi_usb_probe ( struct usb_function *func, /* Assume no strings are present */ header.len = 0; } + lang_len = ( ( header.len >= sizeof ( header ) ) ? + ( header.len - sizeof ( header ) ) : 0 ); /* Allocate and initialise structure */ - usbdev = zalloc ( sizeof ( *usbdev ) + config_len + header.len ); + usbdev = zalloc ( sizeof ( *usbdev ) + config_len + + sizeof ( *lang ) + lang_len ); if ( ! usbdev ) { rc = -ENOMEM; goto err_alloc; } usb_func_set_drvdata ( func, usbdev ); usbdev->name = func->name; - usbdev->usb = usb; - usbdev->efidev = efidev; + usbdev->func = func; usbdev->config = ( ( ( void * ) usbdev ) + sizeof ( *usbdev ) ); memcpy ( usbdev->config, config, config_len ); - usbdev->languages = ( ( ( void * ) usbdev->config ) + config_len ); + lang = ( ( ( void * ) usbdev->config ) + config_len ); + usbdev->lang = ( ( ( void * ) lang ) + sizeof ( *lang ) ); + usbdev->lang_len = lang_len; INIT_LIST_HEAD ( &usbdev->interfaces ); - /* Get supported languages descriptor */ - if ( header.len && - ( rc = usb_get_descriptor ( usb, 0, USB_STRING_DESCRIPTOR, 0, 0, - usbdev->languages, - header.len ) ) != 0 ) { + /* Get supported languages descriptor, if applicable */ + if ( lang_len && + ( ( rc = usb_get_descriptor ( usb, 0, USB_STRING_DESCRIPTOR, + 0, 0, lang, header.len ) ) != 0 ) ) { DBGC ( usbdev, "USBDEV %s could not get supported languages: " "%s\n", usbdev->name, strerror ( rc ) ); goto err_get_languages; @@ -1256,7 +1328,6 @@ static int efi_usb_probe ( struct usb_function *func, err_get_languages: free ( usbdev ); err_alloc: - err_no_efidev: return rc; } @@ -1286,7 +1357,7 @@ static struct usb_device_id efi_usb_ids[] = { }; /** USB I/O protocol driver */ -struct usb_driver usbio_driver __usb_driver = { +struct usb_driver usbio_driver __usb_fallback_driver = { .ids = efi_usb_ids, .id_count = ( sizeof ( efi_usb_ids ) / sizeof ( efi_usb_ids[0] ) ), .class = USB_CLASS_ID ( USB_ANY_ID, USB_ANY_ID, USB_ANY_ID ), diff --git a/src/interface/efi/efi_utils.c b/src/interface/efi/efi_utils.c index 4dc75414c..8e660e9d7 100644 --- a/src/interface/efi/efi_utils.c +++ b/src/interface/efi/efi_utils.c @@ -32,36 +32,6 @@ FILE_LICENCE ( GPL2_OR_LATER ); * */ -/** - * Find end of device path - * - * @v path Path to device - * @ret path_end End of device path - */ -EFI_DEVICE_PATH_PROTOCOL * efi_devpath_end ( EFI_DEVICE_PATH_PROTOCOL *path ) { - - while ( path->Type != END_DEVICE_PATH_TYPE ) { - path = ( ( ( void * ) path ) + - /* There's this amazing new-fangled thing known as - * a UINT16, but who wants to use one of those? */ - ( ( path->Length[1] << 8 ) | path->Length[0] ) ); - } - - return path; -} - -/** - * Find length of device path (excluding terminator) - * - * @v path Path to device - * @ret path_len Length of device path - */ -size_t efi_devpath_len ( EFI_DEVICE_PATH_PROTOCOL *path ) { - EFI_DEVICE_PATH_PROTOCOL *end = efi_devpath_end ( path ); - - return ( ( ( void * ) end ) - ( ( void * ) path ) ); -} - /** * Locate parent device supporting a given protocol * @@ -175,7 +145,7 @@ void efi_child_del ( EFI_HANDLE parent, EFI_HANDLE child ) { static int efi_pci_info ( EFI_HANDLE device, const char *prefix, struct device *dev ) { EFI_HANDLE pci_device; - struct pci_device pci; + struct efi_pci_device efipci; int rc; /* Find parent PCI device */ @@ -187,16 +157,16 @@ static int efi_pci_info ( EFI_HANDLE device, const char *prefix, } /* Get PCI device information */ - if ( ( rc = efipci_info ( pci_device, &pci ) ) != 0 ) { + if ( ( rc = efipci_info ( pci_device, &efipci ) ) != 0 ) { DBGC ( device, "EFIDEV %s could not get PCI information: %s\n", efi_handle_name ( device ), strerror ( rc ) ); return rc; } /* Populate device information */ - memcpy ( &dev->desc, &pci.dev.desc, sizeof ( dev->desc ) ); + memcpy ( &dev->desc, &efipci.pci.dev.desc, sizeof ( dev->desc ) ); snprintf ( dev->name, sizeof ( dev->name ), "%s-%s", - prefix, pci.dev.name ); + prefix, efipci.pci.dev.name ); return 0; } diff --git a/src/interface/efi/efi_veto.c b/src/interface/efi/efi_veto.c new file mode 100644 index 000000000..b616539d3 --- /dev/null +++ b/src/interface/efi/efi_veto.c @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2019 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** @file + * + * EFI driver vetoes + * + */ + +/** A driver veto */ +struct efi_veto { + /** Veto name (for debugging) */ + const char *name; + /** + * Check if driver is vetoed + * + * @v binding Driver binding protocol + * @v loaded Loaded image protocol + * @v wtf Component name protocol, if present + * @v manufacturer Manufacturer name, if present + * @v name Driver name (in "eng" language), if present + * @ret vetoed Driver is to be vetoed + */ + int ( * veto ) ( EFI_DRIVER_BINDING_PROTOCOL *binding, + EFI_LOADED_IMAGE_PROTOCOL *loaded, + EFI_COMPONENT_NAME_PROTOCOL *wtf, + const char *manufacturer, const CHAR16 *name ); +}; + +/** + * Unload an EFI driver + * + * @v driver Driver binding handle + * @ret rc Return status code + */ +static int efi_veto_unload ( EFI_HANDLE driver ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_STATUS efirc; + int rc; + + /* Unload the driver */ + if ( ( efirc = bs->UnloadImage ( driver ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not unload: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Disconnect an EFI driver from all handles + * + * @v driver Driver binding handle + * @ret rc Return status code + */ +static int efi_veto_disconnect ( EFI_HANDLE driver ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE *handles; + EFI_HANDLE handle; + UINTN count; + unsigned int i; + EFI_STATUS efirc; + int rc; + + /* Enumerate all handles */ + if ( ( efirc = bs->LocateHandleBuffer ( AllHandles, NULL, NULL, + &count, &handles ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not enumerate handles: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + goto err_list; + } + + /* Disconnect driver from all handles, in reverse order */ + for ( i = 0 ; i < count ; i++ ) { + handle = handles[ count - i - 1 ]; + efirc = bs->DisconnectController ( handle, driver, NULL ); + if ( ( efirc != 0 ) && ( efirc != EFI_NOT_FOUND ) ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not disconnect", + efi_handle_name ( driver ) ); + DBGC ( driver, " %s: %s\n", + efi_handle_name ( handle ), strerror ( rc ) ); + goto err_disconnect; + } + } + + /* Success */ + rc = 0; + DBGC2 ( driver, "EFIVETO %s disconnected all handles\n", + efi_handle_name ( driver ) ); + + err_disconnect: + bs->FreePool ( handles ); + err_list: + return rc; +} + +/** + * Uninstall an EFI driver binding protocol + * + * @v driver Driver binding handle + * @ret rc Return status code + */ +static int efi_veto_uninstall ( EFI_HANDLE driver ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + EFI_DRIVER_BINDING_PROTOCOL *binding; + void *interface; + } binding; + EFI_STATUS efirc; + int rc; + + /* Open driver binding protocol */ + if ( ( efirc = bs->OpenProtocol ( + driver, &efi_driver_binding_protocol_guid, + &binding.interface, efi_image_handle, driver, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not open driver binding " + "protocol: %s\n", efi_handle_name ( driver ), + strerror ( rc ) ); + return rc; + } + + /* Close driver binding protocol */ + bs->CloseProtocol ( driver, &efi_driver_binding_protocol_guid, + efi_image_handle, driver ); + + /* Uninstall driver binding protocol */ + if ( ( efirc = bs->UninstallMultipleProtocolInterfaces ( + driver, &efi_driver_binding_protocol_guid, + binding.binding, NULL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not uninstall driver " + "binding protocol: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + return rc; + } + + DBGC2 ( driver, "EFIVETO %s uninstalled driver binding protocol\n", + efi_handle_name ( driver ) ); + return 0; +} + +/** + * Close protocol on handle potentially opened by an EFI driver + * + * @v driver Driver binding handle + * @v handle Potentially opened handle + * @v protocol Opened protocol + * @ret rc Return status code + */ +static int efi_veto_close_protocol ( EFI_HANDLE driver, EFI_HANDLE handle, + EFI_GUID *protocol ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_OPEN_PROTOCOL_INFORMATION_ENTRY *openers; + EFI_OPEN_PROTOCOL_INFORMATION_ENTRY *opener; + EFI_HANDLE controller; + UINTN count; + unsigned int i; + EFI_STATUS efirc; + int rc; + + /* Retrieve list of openers */ + if ( ( efirc = bs->OpenProtocolInformation ( handle, protocol, &openers, + &count ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not retrieve openers", + efi_handle_name ( driver ) ); + DBGC ( driver, " of %s %s: %s", efi_handle_name ( handle ), + efi_guid_ntoa ( protocol ), strerror ( rc ) ); + goto err_list; + } + + /* Close anything opened by this driver */ + for ( i = 0 ; i < count ; i++ ) { + opener = &openers[i]; + if ( opener->AgentHandle != driver ) + continue; + controller = opener->ControllerHandle; + DBGC_EFI_OPENER ( driver, handle, protocol, opener ); + if ( ( efirc = bs->CloseProtocol ( handle, protocol, driver, + controller ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not close stray open", + efi_handle_name ( driver ) ); + DBGC ( driver, " of %s: %s\n", + efi_handle_name ( handle ), strerror ( rc ) ); + goto err_close; + } + } + + /* Success */ + rc = 0; + + err_close: + bs->FreePool ( openers ); + err_list: + return rc; +} + +/** + * Close handle potentially opened by an EFI driver + * + * @v driver Driver binding handle + * @v handle Potentially opened handle + * @ret rc Return status code + */ +static int efi_veto_close_handle ( EFI_HANDLE driver, EFI_HANDLE handle ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_GUID **protocols; + UINTN count; + unsigned int i; + EFI_STATUS efirc; + int rc; + + /* Retrieve list of protocols */ + if ( ( efirc = bs->ProtocolsPerHandle ( handle, &protocols, + &count ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not retrieve protocols", + efi_handle_name ( driver ) ); + DBGC ( driver, " for %s: %s\n", + efi_handle_name ( handle ), strerror ( rc ) ); + goto err_list; + } + + /* Close each protocol */ + for ( i = 0 ; i < count ; i++ ) { + if ( ( rc = efi_veto_close_protocol ( driver, handle, + protocols[i] ) ) != 0 ) + goto err_close; + } + + /* Success */ + rc = 0; + + err_close: + bs->FreePool ( protocols ); + err_list: + return rc; +} + +/** + * Close all remaining handles opened by an EFI driver + * + * @v driver Driver binding handle + * @ret rc Return status code + */ +static int efi_veto_close ( EFI_HANDLE driver ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + EFI_HANDLE *handles; + UINTN count; + unsigned int i; + EFI_STATUS efirc; + int rc; + + /* Enumerate all handles */ + if ( ( efirc = bs->LocateHandleBuffer ( AllHandles, NULL, NULL, + &count, &handles ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not enumerate handles: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + goto err_list; + } + + /* Close each handle */ + for ( i = 0 ; i < count ; i++ ) { + if ( ( rc = efi_veto_close_handle ( driver, + handles[i] ) ) != 0 ) + goto err_close; + } + + /* Success */ + rc = 0; + DBGC2 ( driver, "EFIVETO %s closed all remaining handles\n", + efi_handle_name ( driver ) ); + + err_close: + bs->FreePool ( handles ); + err_list: + return rc; +} + +/** + * Terminate an EFI driver with extreme prejudice + * + * @v driver Driver binding handle + * @ret rc Return status code + */ +static int efi_veto_destroy ( EFI_HANDLE driver ) { + int rc; + + /* Disconnect driver from all handles */ + if ( ( rc = efi_veto_disconnect ( driver ) ) != 0 ) + return rc; + + /* Uninstall driver binding protocol */ + if ( ( rc = efi_veto_uninstall ( driver ) ) != 0 ) + return rc; + + /* Close any remaining opened handles */ + if ( ( rc = efi_veto_close ( driver ) ) != 0 ) + return rc; + + DBGC ( driver, "EFIVETO %s forcibly removed\n", + efi_handle_name ( driver ) ); + return 0; +} + +/** + * Veto an EFI driver + * + * @v driver Driver binding handle + * @ret rc Return status code + */ +static int efi_veto_driver ( EFI_HANDLE driver ) { + int rc; + + /* Try gracefully unloading the driver */ + if ( ( rc = efi_veto_unload ( driver ) ) == 0 ) + return 0; + + /* If that fails, use a hammer */ + if ( ( rc = efi_veto_destroy ( driver ) ) == 0 ) + return 0; + + return rc; +} + +/** + * Veto Ip4ConfigDxe driver on some platforms + * + * @v binding Driver binding protocol + * @v loaded Loaded image protocol + * @v wtf Component name protocol, if present + * @v manufacturer Manufacturer name, if present + * @v name Driver name, if present + * @ret vetoed Driver is to be vetoed + */ +static int +efi_veto_ip4config ( EFI_DRIVER_BINDING_PROTOCOL *binding __unused, + EFI_LOADED_IMAGE_PROTOCOL *loaded __unused, + EFI_COMPONENT_NAME_PROTOCOL *wtf __unused, + const char *manufacturer, const CHAR16 *name ) { + static const CHAR16 ip4cfg[] = L"IP4 CONFIG Network Service Driver"; + static const char *dell = "Dell Inc."; + static const char *itautec = "Itautec S.A."; + + /* Check manufacturer and driver name */ + if ( ! manufacturer ) + return 0; + if ( ! name ) + return 0; + if ( ( strcmp ( manufacturer, dell ) != 0 ) && + ( strcmp ( manufacturer, itautec ) != 0 ) ) + return 0; + if ( memcmp ( name, ip4cfg, sizeof ( ip4cfg ) ) != 0 ) + return 0; + + return 1; +} + +/** + * Veto HP XhciDxe driver + * + * @v binding Driver binding protocol + * @v loaded Loaded image protocol + * @v wtf Component name protocol, if present + * @v manufacturer Manufacturer name, if present + * @v name Driver name, if present + * @ret vetoed Driver is to be vetoed + */ +static int +efi_veto_hp_xhci ( EFI_DRIVER_BINDING_PROTOCOL *binding __unused, + EFI_LOADED_IMAGE_PROTOCOL *loaded __unused, + EFI_COMPONENT_NAME_PROTOCOL *wtf __unused, + const char *manufacturer, const CHAR16 *name ) { + static const CHAR16 xhci[] = L"Usb Xhci Driver"; + static const char *hp = "HP"; + struct pci_driver *driver; + + /* Check manufacturer and driver name */ + if ( ! manufacturer ) + return 0; + if ( ! name ) + return 0; + if ( strcmp ( manufacturer, hp ) != 0 ) + return 0; + if ( memcmp ( name, xhci, sizeof ( xhci ) ) != 0 ) + return 0; + + /* Veto driver only if we have our own xHCI driver */ + for_each_table_entry ( driver, PCI_DRIVERS ) { + if ( driver->class.class == + PCI_CLASS ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB, + PCI_CLASS_SERIAL_USB_XHCI ) ) { + return 1; + } + } + + return 0; +} + +/** Driver vetoes */ +static struct efi_veto efi_vetoes[] = { + { + .name = "Ip4Config", + .veto = efi_veto_ip4config, + }, + { + .name = "HP Xhci", + .veto = efi_veto_hp_xhci, + }, +}; + +/** + * Find driver veto, if any + * + * @v driver Driver binding handle + * @v manufacturer Manufacturer name, if present + * @ret veto Driver veto, or NULL + * @ret rc Return status code + */ +static int efi_veto_find ( EFI_HANDLE driver, const char *manufacturer, + struct efi_veto **veto ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + union { + EFI_DRIVER_BINDING_PROTOCOL *binding; + void *interface; + } binding; + union { + EFI_LOADED_IMAGE_PROTOCOL *loaded; + void *interface; + } loaded; + union { + EFI_COMPONENT_NAME_PROTOCOL *wtf; + void *interface; + } wtf; + CHAR16 *name; + unsigned int i; + EFI_HANDLE image; + EFI_STATUS efirc; + int rc; + + DBGC2 ( &efi_vetoes, "EFIVETO checking %s\n", + efi_handle_name ( driver ) ); + + /* Mark as not vetoed */ + *veto = NULL; + + /* Open driver binding protocol */ + if ( ( efirc = bs->OpenProtocol ( + driver, &efi_driver_binding_protocol_guid, + &binding.interface, efi_image_handle, driver, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not open driver binding " + "protocol: %s\n", efi_handle_name ( driver ), + strerror ( rc ) ); + goto err_binding; + } + image = binding.binding->ImageHandle; + + /* Open loaded image protocol */ + if ( ( efirc = bs->OpenProtocol ( + image, &efi_loaded_image_protocol_guid, + &loaded.interface, efi_image_handle, image, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( driver, "EFIVETO %s could not open", + efi_handle_name ( driver ) ); + DBGC ( driver, " %s loaded image protocol: %s\n", + efi_handle_name ( image ), strerror ( rc ) ); + goto err_loaded; + } + + /* Open component name protocol, if present*/ + if ( ( efirc = bs->OpenProtocol ( + driver, &efi_component_name_protocol_guid, + &wtf.interface, efi_image_handle, driver, + EFI_OPEN_PROTOCOL_GET_PROTOCOL ) ) != 0 ) { + /* Ignore failure; is not required to be present */ + wtf.interface = NULL; + } + + /* Get driver name, if available */ + if ( wtf.wtf && + ( ( efirc = wtf.wtf->GetDriverName ( wtf.wtf, "eng", + &name ) == 0 ) ) ) { + /* Driver has a name */ + } else { + /* Ignore failure; name is not required to be present */ + name = NULL; + } + + /* Check vetoes */ + for ( i = 0 ; i < ( sizeof ( efi_vetoes ) / + sizeof ( efi_vetoes[0] ) ) ; i++ ) { + if ( efi_vetoes[i].veto ( binding.binding, loaded.loaded, + wtf.wtf, manufacturer, name ) ) { + *veto = &efi_vetoes[i]; + break; + } + } + + /* Success */ + rc = 0; + + /* Close protocols */ + if ( wtf.wtf ) { + bs->CloseProtocol ( driver, &efi_component_name_protocol_guid, + efi_image_handle, driver ); + } + bs->CloseProtocol ( image, &efi_loaded_image_protocol_guid, + efi_image_handle, image ); + err_loaded: + bs->CloseProtocol ( driver, &efi_driver_binding_protocol_guid, + efi_image_handle, driver ); + err_binding: + return rc; +} + +/** + * Remove any vetoed drivers + * + */ +void efi_veto ( void ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + struct efi_veto *veto; + EFI_HANDLE *drivers; + EFI_HANDLE driver; + UINTN num_drivers; + unsigned int i; + char *manufacturer; + EFI_STATUS efirc; + int rc; + + /* Locate all driver binding protocol handles */ + if ( ( efirc = bs->LocateHandleBuffer ( + ByProtocol, &efi_driver_binding_protocol_guid, + NULL, &num_drivers, &drivers ) ) != 0 ) { + rc = -EEFI ( efirc ); + DBGC ( &efi_vetoes, "EFIVETO could not list all drivers: " + "%s\n", strerror ( rc ) ); + return; + } + + /* Get manufacturer name */ + fetch_string_setting_copy ( NULL, &manufacturer_setting, + &manufacturer ); + + /* Unload any vetoed drivers */ + for ( i = 0 ; i < num_drivers ; i++ ) { + driver = drivers[i]; + if ( ( rc = efi_veto_find ( driver, manufacturer, + &veto ) ) != 0 ) { + DBGC ( driver, "EFIVETO %s could not determine " + "vetoing: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + continue; + } + if ( ! veto ) + continue; + DBGC ( driver, "EFIVETO %s is vetoed (%s)\n", + efi_handle_name ( driver ), veto->name ); + if ( ( rc = efi_veto_driver ( driver ) ) != 0 ) { + DBGC ( driver, "EFIVETO %s could not veto: %s\n", + efi_handle_name ( driver ), strerror ( rc ) ); + } + } + + /* Free manufacturer name */ + free ( manufacturer ); + + /* Free handle list */ + bs->FreePool ( drivers ); +} diff --git a/src/interface/efi/efi_wrap.c b/src/interface/efi/efi_wrap.c index c0c40eec6..5c02a7ee1 100644 --- a/src/interface/efi/efi_wrap.c +++ b/src/interface/efi/efi_wrap.c @@ -37,14 +37,8 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include -/** EFI system table wrapper */ -static EFI_SYSTEM_TABLE efi_systab_wrapper; - -/** EFI boot services table wrapper */ -static EFI_BOOT_SERVICES efi_bs_wrapper; - /** Colour for debug messages */ -#define colour &efi_systab_wrapper +#define colour &efi_systab /** * Convert EFI status code to text @@ -111,6 +105,358 @@ static const char * efi_boolean ( BOOLEAN boolean ) { return ( boolean ? "TRUE" : "FALSE" ); } +/** + * Convert EFI TPL to text + * + * @v tpl Task priority level + * @ret text Task priority level as text + */ +static const char * efi_tpl ( EFI_TPL tpl ) { + static char buf[ 19 /* "0xXXXXXXXXXXXXXXXX" + NUL */ ]; + + switch ( tpl ) { + case TPL_APPLICATION: return "Application"; + case TPL_CALLBACK: return "Callback"; + case TPL_NOTIFY: return "Notify"; + case TPL_HIGH_LEVEL: return "HighLevel"; + default: + snprintf ( buf, sizeof ( buf ), "%#lx", + ( unsigned long ) tpl ); + return buf; + } +} + +/** + * Convert EFI allocation type to text + * + * @v type Allocation type + * @ret text Allocation type as text + */ +static const char * efi_allocate_type ( EFI_ALLOCATE_TYPE type ) { + static char buf[ 11 /* "0xXXXXXXXX" + NUL */ ]; + + switch ( type ) { + case AllocateAnyPages: return "AnyPages"; + case AllocateMaxAddress: return "MaxAddress"; + case AllocateAddress: return "Address"; + default: + snprintf ( buf, sizeof ( buf ), "%#x", type ); + return buf; + } +} + +/** + * Convert EFI memory type to text + * + * @v type Memory type + * @ret text Memory type as text + */ +static const char * efi_memory_type ( EFI_MEMORY_TYPE type ) { + static char buf[ 11 /* "0xXXXXXXXX" + NUL */ ]; + + switch ( type ) { + case EfiReservedMemoryType: return "Reserved"; + case EfiLoaderCode: return "LoaderCode"; + case EfiLoaderData: return "LoaderData"; + case EfiBootServicesCode: return "BootCode"; + case EfiBootServicesData: return "BootData"; + case EfiRuntimeServicesCode: return "RuntimeCode"; + case EfiRuntimeServicesData: return "RuntimeData"; + case EfiConventionalMemory: return "Conventional"; + case EfiUnusableMemory: return "Unusable"; + case EfiACPIReclaimMemory: return "ACPIReclaim"; + case EfiACPIMemoryNVS: return "ACPINVS"; + case EfiMemoryMappedIO: return "MMIO"; + case EfiMemoryMappedIOPortSpace:return "PIO"; + case EfiPalCode: return "PalCode"; + case EfiPersistentMemory: return "Persistent"; + default: + snprintf ( buf, sizeof ( buf ), "%#x", type ); + return buf; + } +} + +/** + * Convert EFI timer delay type to text + * + * @v type Timer delay type + * @ret text Timer delay type as text + */ +static const char * efi_timer_delay ( EFI_TIMER_DELAY type ) { + static char buf[ 11 /* "0xXXXXXXXX" + NUL */ ]; + + switch ( type ) { + case TimerCancel: return "Cancel"; + case TimerPeriodic: return "Periodic"; + case TimerRelative: return "Relative"; + default: + snprintf ( buf, sizeof ( buf ), "%#x", type ); + return buf; + } +} + +/** + * Wrap RaiseTPL() + * + */ +static EFI_TPL EFIAPI +efi_raise_tpl_wrapper ( EFI_TPL new_tpl ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_TPL old_tpl; + + DBGCP ( colour, "RaiseTPL ( %s ) ", efi_tpl ( new_tpl ) ); + old_tpl = bs->RaiseTPL ( new_tpl ); + DBGCP ( colour, "= %s -> %p\n", efi_tpl ( old_tpl ), retaddr ); + return old_tpl; +} + +/** + * Wrap RestoreTPL() + * + */ +static VOID EFIAPI +efi_restore_tpl_wrapper ( EFI_TPL old_tpl ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + + DBGCP ( colour, "RestoreTPL ( %s ) ", efi_tpl ( old_tpl ) ); + bs->RestoreTPL ( old_tpl ); + DBGCP ( colour, "-> %p\n", retaddr ); +} + +/** + * Wrap AllocatePages() + * + */ +static EFI_STATUS EFIAPI +efi_allocate_pages_wrapper ( EFI_ALLOCATE_TYPE type, + EFI_MEMORY_TYPE memory_type, UINTN pages, + EFI_PHYSICAL_ADDRESS *memory ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "AllocatePages ( %s, %s, %#llx, %#llx ) ", + efi_allocate_type ( type ), efi_memory_type ( memory_type ), + ( ( unsigned long long ) pages ), + ( ( unsigned long long ) *memory ) ); + efirc = bs->AllocatePages ( type, memory_type, pages, memory ); + DBGC2 ( colour, "= %s ( %#llx ) -> %p\n", efi_status ( efirc ), + ( ( unsigned long long ) *memory ), retaddr ); + return efirc; +} + +/** + * Wrap FreePages() + * + */ +static EFI_STATUS EFIAPI +efi_free_pages_wrapper ( EFI_PHYSICAL_ADDRESS memory, UINTN pages ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "FreePages ( %#llx, %#llx ) ", + ( ( unsigned long long ) memory ), + ( ( unsigned long long ) pages ) ); + efirc = bs->FreePages ( memory, pages ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap GetMemoryMap() + * + */ +static EFI_STATUS EFIAPI +efi_get_memory_map_wrapper ( UINTN *memory_map_size, + EFI_MEMORY_DESCRIPTOR *memory_map, UINTN *map_key, + UINTN *descriptor_size, + UINT32 *descriptor_version ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_MEMORY_DESCRIPTOR *desc; + size_t remaining; + EFI_STATUS efirc; + + DBGC ( colour, "GetMemoryMap ( %#llx, %p ) ", + ( ( unsigned long long ) *memory_map_size ), memory_map ); + efirc = bs->GetMemoryMap ( memory_map_size, memory_map, map_key, + descriptor_size, descriptor_version ); + DBGC ( colour, "= %s ( %#llx, %#llx, %#llx, v%d", + efi_status ( efirc ), + ( ( unsigned long long ) *memory_map_size ), + ( ( unsigned long long ) *map_key ), + ( ( unsigned long long ) *descriptor_size ), + *descriptor_version ); + if ( DBG_EXTRA && ( efirc == 0 ) ) { + DBGC2 ( colour, ",\n" ); + for ( desc = memory_map, remaining = *memory_map_size ; + remaining >= *descriptor_size ; + desc = ( ( ( void * ) desc ) + *descriptor_size ), + remaining -= *descriptor_size ) { + DBGC2 ( colour, "%#016llx+%#08llx %#016llx " + "%s\n", desc->PhysicalStart, + ( desc->NumberOfPages * EFI_PAGE_SIZE ), + desc->Attribute, + efi_memory_type ( desc->Type ) ); + } + } else { + DBGC ( colour, " " ); + } + DBGC ( colour, ") -> %p\n", retaddr ); + return efirc; +} + +/** + * Wrap AllocatePool() + * + */ +static EFI_STATUS EFIAPI +efi_allocate_pool_wrapper ( EFI_MEMORY_TYPE pool_type, UINTN size, + VOID **buffer ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "AllocatePool ( %s, %#llx ) ", + efi_memory_type ( pool_type ), + ( ( unsigned long long ) size ) ); + efirc = bs->AllocatePool ( pool_type, size, buffer ); + DBGC2 ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *buffer, retaddr ); + return efirc; +} + +/** + * Wrap FreePool() + * + */ +static EFI_STATUS EFIAPI +efi_free_pool_wrapper ( VOID *buffer ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "FreePool ( %p ) ", buffer ); + efirc = bs->FreePool ( buffer ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap CreateEvent() + * + */ +static EFI_STATUS EFIAPI +efi_create_event_wrapper ( UINT32 type, EFI_TPL notify_tpl, + EFI_EVENT_NOTIFY notify_function, + VOID *notify_context, EFI_EVENT *event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "CreateEvent ( %#x, %s, %p, %p ) ", + type, efi_tpl ( notify_tpl ), notify_function, notify_context ); + efirc = bs->CreateEvent ( type, notify_tpl, notify_function, + notify_context, event ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *event, retaddr ); + return efirc; +} + +/** + * Wrap SetTimer() + * + */ +static EFI_STATUS EFIAPI +efi_set_timer_wrapper ( EFI_EVENT event, EFI_TIMER_DELAY type, + UINT64 trigger_time ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "SetTimer ( %p, %s, %ld.%07ld00s ) ", + event, efi_timer_delay ( type ), + ( ( unsigned long ) ( trigger_time / 10000000 ) ), + ( ( unsigned long ) ( trigger_time % 10000000 ) ) ); + efirc = bs->SetTimer ( event, type, trigger_time ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap WaitForEvent() + * + */ +static EFI_STATUS EFIAPI +efi_wait_for_event_wrapper ( UINTN number_of_events, EFI_EVENT *event, + UINTN *index ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "WaitForEvent (" ); + for ( i = 0 ; i < number_of_events ; i++ ) + DBGC ( colour, " %p", event[i] ); + DBGC ( colour, " ) " ); + efirc = bs->WaitForEvent ( number_of_events, event, index ); + DBGC ( colour, "= %s", efi_status ( efirc ) ); + if ( efirc == 0 ) + DBGC ( colour, " ( %p )", event[*index] ); + DBGC ( colour, " -> %p\n", retaddr ); + return efirc; +} + +/** + * Wrap SignalEvent() + * + */ +static EFI_STATUS EFIAPI +efi_signal_event_wrapper ( EFI_EVENT event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "SignalEvent ( %p ) ", event ); + efirc = bs->SignalEvent ( event ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap CloseEvent() + * + */ +static EFI_STATUS EFIAPI +efi_close_event_wrapper ( EFI_EVENT event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "CloseEvent ( %p ) ", event ); + efirc = bs->SignalEvent ( event ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} +/** + * Wrap CheckEvent() + * + */ +static EFI_STATUS EFIAPI +efi_check_event_wrapper ( EFI_EVENT event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGCP ( colour, "CheckEvent ( %p ) ", event ); + efirc = bs->SignalEvent ( event ); + DBGCP ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + /** * Wrap InstallProtocolInterface() * @@ -194,6 +540,25 @@ efi_handle_protocol_wrapper ( EFI_HANDLE handle, EFI_GUID *protocol, return efirc; } +/** + * Wrap RegisterProtocolNotify() + * + */ +static EFI_STATUS EFIAPI +efi_register_protocol_notify_wrapper ( EFI_GUID *protocol, EFI_EVENT event, + VOID **registration ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "RegisterProtocolNotify ( %s, %p ) ", + efi_guid_ntoa ( protocol ), event ); + efirc = bs->RegisterProtocolNotify ( protocol, event, registration ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *registration, retaddr ); + return efirc; +} + /** * Wrap LocateHandle() * @@ -248,6 +613,23 @@ efi_locate_device_path_wrapper ( EFI_GUID *protocol, return efirc; } +/** + * Wrap InstallConfigurationTable() + * + */ +static EFI_STATUS EFIAPI +efi_install_configuration_table_wrapper ( EFI_GUID *guid, VOID *table ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "InstallConfigurationTable ( %s, %p ) ", + efi_guid_ntoa ( guid ), table ); + efirc = bs->InstallConfigurationTable ( guid, table ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + /** * Wrap LoadImage() * @@ -361,6 +743,61 @@ efi_exit_boot_services_wrapper ( EFI_HANDLE image_handle, UINTN map_key ) { return efirc; } +/** + * Wrap GetNextMonotonicCount() + * + */ +static EFI_STATUS EFIAPI +efi_get_next_monotonic_count_wrapper ( UINT64 *count ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGCP ( colour, "GetNextMonotonicCount() " ); + efirc = bs->GetNextMonotonicCount ( count ); + DBGCP ( colour, "= %s ( %#llx ) -> %p\n", + efi_status ( efirc ), *count, retaddr ); + return efirc; +} + +/** + * Wrap Stall() + * + */ +static EFI_STATUS EFIAPI +efi_stall_wrapper ( UINTN microseconds ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC2 ( colour, "Stall ( %ld.%06lds ) ", + ( ( unsigned long ) ( microseconds / 1000000 ) ), + ( ( unsigned long ) ( microseconds % 1000000 ) ) ); + efirc = bs->Stall ( microseconds ); + DBGC2 ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap SetWatchdogTimer() + * + */ +static EFI_STATUS EFIAPI +efi_set_watchdog_timer_wrapper ( UINTN timeout, UINT64 watchdog_code, + UINTN data_size, CHAR16 *watchdog_data ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "SetWatchdogTimer ( %lds, %#llx, %#llx, %p ) ", + ( ( unsigned long ) timeout ), watchdog_code, + ( ( unsigned long long ) data_size ), watchdog_data ); + efirc = bs->SetWatchdogTimer ( timeout, watchdog_code, data_size, + watchdog_data ); + DBGC ( colour, "= %s -> %p\n", efi_status ( efirc ), retaddr ); + return efirc; +} + /** * Wrap ConnectController() * @@ -462,6 +899,29 @@ efi_close_protocol_wrapper ( EFI_HANDLE handle, EFI_GUID *protocol, return efirc; } +/** + * Wrap OpenProtocolInformation() + * + */ +static EFI_STATUS EFIAPI +efi_open_protocol_information_wrapper ( EFI_HANDLE handle, EFI_GUID *protocol, + EFI_OPEN_PROTOCOL_INFORMATION_ENTRY + **entry_buffer, + UINTN *entry_count ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "OpenProtocolInformation ( %s, %s ) ", + efi_handle_name ( handle ), efi_guid_ntoa ( protocol ) ); + efirc = bs->OpenProtocolInformation ( handle, protocol, entry_buffer, + entry_count ); + DBGC ( colour, "= %s ( %p, %#llx ) -> %p\n", + efi_status ( efirc ), *entry_buffer, + ( ( unsigned long long ) *entry_count ), retaddr ); + return efirc; +} + /** * Wrap ProtocolsPerHandle() * @@ -542,12 +1002,212 @@ efi_locate_protocol_wrapper ( EFI_GUID *protocol, VOID *registration, return efirc; } +/** Maximum number of interfaces for wrapped ...MultipleProtocolInterfaces() */ +#define MAX_WRAP_MULTI 20 + +/** + * Wrap InstallMultipleProtocolInterfaces() + * + */ +static EFI_STATUS EFIAPI +efi_install_multiple_protocol_interfaces_wrapper ( EFI_HANDLE *handle, ... ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_GUID *protocol[ MAX_WRAP_MULTI + 1 ]; + VOID *interface[MAX_WRAP_MULTI]; + VA_LIST ap; + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "InstallMultipleProtocolInterfaces ( %s", + efi_handle_name ( *handle ) ); + memset ( protocol, 0, sizeof ( protocol ) ); + memset ( interface, 0, sizeof ( interface ) ); + VA_START ( ap, handle ); + for ( i = 0 ; ( protocol[i] = VA_ARG ( ap, EFI_GUID * ) ) ; i++ ) { + if ( i == MAX_WRAP_MULTI ) { + VA_END ( ap ); + efirc = EFI_OUT_OF_RESOURCES; + DBGC ( colour, " ) = %s " + "-> %p\n", efi_status ( efirc ), retaddr ); + return efirc; + } + interface[i] = VA_ARG ( ap, VOID * ); + DBGC ( colour, ", %s, %p", + efi_guid_ntoa ( protocol[i] ), interface[i] ); + } + VA_END ( ap ); + DBGC ( colour, " ) " ); + efirc = bs->InstallMultipleProtocolInterfaces ( handle, + protocol[0], interface[0], protocol[1], interface[1], + protocol[2], interface[2], protocol[3], interface[3], + protocol[4], interface[4], protocol[5], interface[5], + protocol[6], interface[6], protocol[7], interface[7], + protocol[8], interface[8], protocol[9], interface[9], + protocol[10], interface[10], protocol[11], interface[11], + protocol[12], interface[12], protocol[13], interface[13], + protocol[14], interface[14], protocol[15], interface[15], + protocol[16], interface[16], protocol[17], interface[17], + protocol[18], interface[18], protocol[19], interface[19], + NULL ); + DBGC ( colour, "= %s ( %s ) -> %p\n", + efi_status ( efirc ), efi_handle_name ( *handle ), retaddr ); + return efirc; +} + +/** + * Wrap UninstallMultipleProtocolInterfaces() + * + */ +static EFI_STATUS EFIAPI +efi_uninstall_multiple_protocol_interfaces_wrapper ( EFI_HANDLE handle, ... ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_GUID *protocol[ MAX_WRAP_MULTI + 1 ]; + VOID *interface[MAX_WRAP_MULTI]; + VA_LIST ap; + unsigned int i; + EFI_STATUS efirc; + + DBGC ( colour, "UninstallMultipleProtocolInterfaces ( %s", + efi_handle_name ( handle ) ); + memset ( protocol, 0, sizeof ( protocol ) ); + memset ( interface, 0, sizeof ( interface ) ); + VA_START ( ap, handle ); + for ( i = 0 ; ( protocol[i] = VA_ARG ( ap, EFI_GUID * ) ) ; i++ ) { + if ( i == MAX_WRAP_MULTI ) { + VA_END ( ap ); + efirc = EFI_OUT_OF_RESOURCES; + DBGC ( colour, " ) = %s " + "-> %p\n", efi_status ( efirc ), retaddr ); + return efirc; + } + interface[i] = VA_ARG ( ap, VOID * ); + DBGC ( colour, ", %s, %p", + efi_guid_ntoa ( protocol[i] ), interface[i] ); + } + VA_END ( ap ); + DBGC ( colour, " ) " ); + efirc = bs->UninstallMultipleProtocolInterfaces ( handle, + protocol[0], interface[0], protocol[1], interface[1], + protocol[2], interface[2], protocol[3], interface[3], + protocol[4], interface[4], protocol[5], interface[5], + protocol[6], interface[6], protocol[7], interface[7], + protocol[8], interface[8], protocol[9], interface[9], + protocol[10], interface[10], protocol[11], interface[11], + protocol[12], interface[12], protocol[13], interface[13], + protocol[14], interface[14], protocol[15], interface[15], + protocol[16], interface[16], protocol[17], interface[17], + protocol[18], interface[18], protocol[19], interface[19], + NULL ); + DBGC ( colour, "= %s -> %p\n", + efi_status ( efirc ), retaddr ); + return efirc; +} + +/** + * Wrap CreateEventEx() + * + */ +static EFI_STATUS EFIAPI +efi_create_event_ex_wrapper ( UINT32 type, EFI_TPL notify_tpl, + EFI_EVENT_NOTIFY notify_function, + CONST VOID *notify_context, + CONST EFI_GUID *event_group, EFI_EVENT *event ) { + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + void *retaddr = __builtin_return_address ( 0 ); + EFI_STATUS efirc; + + DBGC ( colour, "CreateEventEx ( %#x, %s, %p, %p, %s ) ", + type, efi_tpl ( notify_tpl ), notify_function, notify_context, + efi_guid_ntoa ( event_group ) ); + efirc = bs->CreateEventEx ( type, notify_tpl, notify_function, + notify_context, event_group, event ); + DBGC ( colour, "= %s ( %p ) -> %p\n", + efi_status ( efirc ), *event, retaddr ); + return efirc; +} + +/** + * Build table wrappers + * + * @ret systab Wrapped system table + */ +EFI_SYSTEM_TABLE * efi_wrap_systab ( void ) { + static EFI_SYSTEM_TABLE efi_systab_wrapper; + static EFI_BOOT_SERVICES efi_bs_wrapper; + EFI_BOOT_SERVICES *bs = efi_systab->BootServices; + + /* Build boot services table wrapper */ + memcpy ( &efi_bs_wrapper, bs, sizeof ( efi_bs_wrapper ) ); + efi_bs_wrapper.RaiseTPL = efi_raise_tpl_wrapper; + efi_bs_wrapper.RestoreTPL = efi_restore_tpl_wrapper; + efi_bs_wrapper.AllocatePages = efi_allocate_pages_wrapper; + efi_bs_wrapper.FreePages = efi_free_pages_wrapper; + efi_bs_wrapper.GetMemoryMap = efi_get_memory_map_wrapper; + efi_bs_wrapper.AllocatePool = efi_allocate_pool_wrapper; + efi_bs_wrapper.FreePool = efi_free_pool_wrapper; + efi_bs_wrapper.CreateEvent = efi_create_event_wrapper; + efi_bs_wrapper.SetTimer = efi_set_timer_wrapper; + efi_bs_wrapper.WaitForEvent = efi_wait_for_event_wrapper; + efi_bs_wrapper.SignalEvent = efi_signal_event_wrapper; + efi_bs_wrapper.CloseEvent = efi_close_event_wrapper; + efi_bs_wrapper.CheckEvent = efi_check_event_wrapper; + efi_bs_wrapper.InstallProtocolInterface + = efi_install_protocol_interface_wrapper; + efi_bs_wrapper.ReinstallProtocolInterface + = efi_reinstall_protocol_interface_wrapper; + efi_bs_wrapper.UninstallProtocolInterface + = efi_uninstall_protocol_interface_wrapper; + efi_bs_wrapper.HandleProtocol = efi_handle_protocol_wrapper; + efi_bs_wrapper.RegisterProtocolNotify + = efi_register_protocol_notify_wrapper; + efi_bs_wrapper.LocateHandle = efi_locate_handle_wrapper; + efi_bs_wrapper.LocateDevicePath = efi_locate_device_path_wrapper; + efi_bs_wrapper.InstallConfigurationTable + = efi_install_configuration_table_wrapper; + efi_bs_wrapper.LoadImage = efi_load_image_wrapper; + efi_bs_wrapper.StartImage = efi_start_image_wrapper; + efi_bs_wrapper.Exit = efi_exit_wrapper; + efi_bs_wrapper.UnloadImage = efi_unload_image_wrapper; + efi_bs_wrapper.ExitBootServices = efi_exit_boot_services_wrapper; + efi_bs_wrapper.GetNextMonotonicCount + = efi_get_next_monotonic_count_wrapper; + efi_bs_wrapper.Stall = efi_stall_wrapper; + efi_bs_wrapper.SetWatchdogTimer = efi_set_watchdog_timer_wrapper; + efi_bs_wrapper.ConnectController + = efi_connect_controller_wrapper; + efi_bs_wrapper.DisconnectController + = efi_disconnect_controller_wrapper; + efi_bs_wrapper.OpenProtocol = efi_open_protocol_wrapper; + efi_bs_wrapper.CloseProtocol = efi_close_protocol_wrapper; + efi_bs_wrapper.OpenProtocolInformation + = efi_open_protocol_information_wrapper; + efi_bs_wrapper.ProtocolsPerHandle + = efi_protocols_per_handle_wrapper; + efi_bs_wrapper.LocateHandleBuffer + = efi_locate_handle_buffer_wrapper; + efi_bs_wrapper.LocateProtocol = efi_locate_protocol_wrapper; + efi_bs_wrapper.InstallMultipleProtocolInterfaces + = efi_install_multiple_protocol_interfaces_wrapper; + efi_bs_wrapper.UninstallMultipleProtocolInterfaces + = efi_uninstall_multiple_protocol_interfaces_wrapper; + efi_bs_wrapper.CreateEventEx = efi_create_event_ex_wrapper; + + /* Build system table wrapper */ + memcpy ( &efi_systab_wrapper, efi_systab, + sizeof ( efi_systab_wrapper ) ); + efi_systab_wrapper.BootServices = &efi_bs_wrapper; + + return &efi_systab_wrapper; +} + /** * Wrap the calls made by a loaded image * * @v handle Image handle */ - void efi_wrap ( EFI_HANDLE handle ) { +void efi_wrap ( EFI_HANDLE handle ) { EFI_BOOT_SERVICES *bs = efi_systab->BootServices; union { EFI_LOADED_IMAGE_PROTOCOL *image; @@ -560,37 +1220,6 @@ efi_locate_protocol_wrapper ( EFI_GUID *protocol, VOID *registration, if ( ! DBG_LOG ) return; - /* Populate table wrappers */ - memcpy ( &efi_systab_wrapper, efi_systab, - sizeof ( efi_systab_wrapper ) ); - memcpy ( &efi_bs_wrapper, bs, sizeof ( efi_bs_wrapper ) ); - efi_systab_wrapper.BootServices = &efi_bs_wrapper; - efi_bs_wrapper.InstallProtocolInterface - = efi_install_protocol_interface_wrapper; - efi_bs_wrapper.ReinstallProtocolInterface - = efi_reinstall_protocol_interface_wrapper; - efi_bs_wrapper.UninstallProtocolInterface - = efi_uninstall_protocol_interface_wrapper; - efi_bs_wrapper.HandleProtocol = efi_handle_protocol_wrapper; - efi_bs_wrapper.LocateHandle = efi_locate_handle_wrapper; - efi_bs_wrapper.LocateDevicePath = efi_locate_device_path_wrapper; - efi_bs_wrapper.LoadImage = efi_load_image_wrapper; - efi_bs_wrapper.StartImage = efi_start_image_wrapper; - efi_bs_wrapper.Exit = efi_exit_wrapper; - efi_bs_wrapper.UnloadImage = efi_unload_image_wrapper; - efi_bs_wrapper.ExitBootServices = efi_exit_boot_services_wrapper; - efi_bs_wrapper.ConnectController - = efi_connect_controller_wrapper; - efi_bs_wrapper.DisconnectController - = efi_disconnect_controller_wrapper; - efi_bs_wrapper.OpenProtocol = efi_open_protocol_wrapper; - efi_bs_wrapper.CloseProtocol = efi_close_protocol_wrapper; - efi_bs_wrapper.ProtocolsPerHandle - = efi_protocols_per_handle_wrapper; - efi_bs_wrapper.LocateHandleBuffer - = efi_locate_handle_buffer_wrapper; - efi_bs_wrapper.LocateProtocol = efi_locate_protocol_wrapper; - /* Open loaded image protocol */ if ( ( efirc = bs->OpenProtocol ( handle, &efi_loaded_image_protocol_guid, @@ -603,7 +1232,7 @@ efi_locate_protocol_wrapper ( EFI_GUID *protocol, VOID *registration, } /* Provide system table wrapper to image */ - loaded.image->SystemTable = &efi_systab_wrapper; + loaded.image->SystemTable = efi_wrap_systab(); DBGC ( colour, "WRAP %s at base %p has protocols:\n", efi_handle_name ( handle ), loaded.image->ImageBase ); DBGC_EFI_PROTOCOLS ( colour, handle ); diff --git a/src/interface/efi/efidrvprefix.c b/src/interface/efi/efidrvprefix.c index 4fbb19ff7..9ca54ff4f 100644 --- a/src/interface/efi/efidrvprefix.c +++ b/src/interface/efi/efidrvprefix.c @@ -34,16 +34,26 @@ FILE_LICENCE ( GPL2_OR_LATER ); */ EFI_STATUS EFIAPI _efidrv_start ( EFI_HANDLE image_handle, EFI_SYSTEM_TABLE *systab ) { + static struct efi_saved_tpl tpl; /* avoid triggering stack protector */ EFI_STATUS efirc; + /* Initialise stack cookie */ + efi_init_stack_guard ( image_handle ); + /* Initialise EFI environment */ if ( ( efirc = efi_init ( image_handle, systab ) ) != 0 ) return efirc; + /* Raise TPL */ + efi_raise_tpl ( &tpl ); + /* Initialise iPXE environment */ initialise(); startup(); + /* Restore TPL */ + efi_restore_tpl ( &tpl ); + return 0; } diff --git a/src/interface/efi/efiprefix.c b/src/interface/efi/efiprefix.c index de3572c75..126c813d7 100644 --- a/src/interface/efi/efiprefix.c +++ b/src/interface/efi/efiprefix.c @@ -22,12 +22,15 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include #include +#include #include #include #include #include +#include +#include #include -#include +#include /** * EFI entry point @@ -41,13 +44,13 @@ EFI_STATUS EFIAPI _efi_start ( EFI_HANDLE image_handle, EFI_STATUS efirc; int rc; + /* Initialise stack cookie */ + efi_init_stack_guard ( image_handle ); + /* Initialise EFI environment */ if ( ( efirc = efi_init ( image_handle, systab ) ) != 0 ) goto err_init; - /* Record autoboot device (if any) */ - efi_set_autoboot(); - /* Claim SNP devices for use by iPXE */ efi_snp_claim(); @@ -69,6 +72,28 @@ EFI_STATUS EFIAPI _efi_start ( EFI_HANDLE image_handle, return efirc; } +/** + * Initialise EFI application + * + */ +static void efi_init_application ( void ) { + EFI_HANDLE device = efi_loaded_image->DeviceHandle; + + /* Identify autoboot device, if any */ + efi_set_autoboot_ll_addr ( device ); + + /* Store cached DHCP packet, if any */ + efi_cachedhcp_record ( device ); + + /* Load autoexec script, if any */ + efi_autoexec_load ( device ); +} + +/** EFI application initialisation function */ +struct init_fn efi_init_application_fn __init_fn ( INIT_NORMAL ) = { + .initialise = efi_init_application, +}; + /** * Probe EFI root bus * @@ -76,8 +101,8 @@ EFI_STATUS EFIAPI _efi_start ( EFI_HANDLE image_handle, */ static int efi_probe ( struct root_device *rootdev __unused ) { - /* Unloaded any blacklisted drivers */ - efi_unload_blacklist(); + /* Remove any vetoed drivers */ + efi_veto(); /* Connect our drivers */ return efi_driver_connect_all(); diff --git a/src/interface/hyperv/vmbus.c b/src/interface/hyperv/vmbus.c index e50fe9951..86d2a08d7 100644 --- a/src/interface/hyperv/vmbus.c +++ b/src/interface/hyperv/vmbus.c @@ -434,7 +434,7 @@ int vmbus_open ( struct vmbus_device *vmdev, len = ( sizeof ( *vmdev->out ) + out_len + sizeof ( *vmdev->in ) + in_len ); assert ( ( len % PAGE_SIZE ) == 0 ); - ring = malloc_dma ( len, PAGE_SIZE ); + ring = malloc_phys ( len, PAGE_SIZE ); if ( ! ring ) { rc = -ENOMEM; goto err_alloc_ring; @@ -509,7 +509,7 @@ int vmbus_open ( struct vmbus_device *vmdev, err_post_message: vmbus_gpadl_teardown ( vmdev, vmdev->gpadl ); err_establish: - free_dma ( ring, len ); + free_phys ( ring, len ); err_alloc_ring: free ( packet ); err_alloc_packet: @@ -555,7 +555,7 @@ void vmbus_close ( struct vmbus_device *vmdev ) { /* Free ring buffer */ len = ( sizeof ( *vmdev->out ) + vmdev->out_len + sizeof ( *vmdev->in ) + vmdev->in_len ); - free_dma ( vmdev->out, len ); + free_phys ( vmdev->out, len ); vmdev->out = NULL; vmdev->in = NULL; diff --git a/src/interface/linux/linux_acpi.c b/src/interface/linux/linux_acpi.c new file mode 100644 index 000000000..e658936f2 --- /dev/null +++ b/src/interface/linux/linux_acpi.c @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** ACPI sysfs directory */ +#define ACPI_SYSFS_PREFIX "/sys/firmware/acpi/tables/" + +/** A cached ACPI table */ +struct linux_acpi_table { + /** List of cached tables */ + struct list_head list; + /** Signature */ + uint32_t signature; + /** Index */ + unsigned int index; + /** Cached data */ + userptr_t data; +}; + +/** List of cached ACPI tables */ +static LIST_HEAD ( linux_acpi_tables ); + +/** + * Locate ACPI table + * + * @v signature Requested table signature + * @v index Requested index of table with this signature + * @ret table Table, or UNULL if not found + */ +static userptr_t linux_acpi_find ( uint32_t signature, unsigned int index ) { + struct linux_acpi_table *table; + struct acpi_header *header; + union { + uint32_t signature; + char filename[5]; + } u; + static const char prefix[] = ACPI_SYSFS_PREFIX; + char filename[ sizeof ( prefix ) - 1 /* NUL */ + 4 /* signature */ + + 3 /* "999" */ + 1 /* NUL */ ]; + int len; + int rc; + + /* Check for existing table */ + list_for_each_entry ( table, &linux_acpi_tables, list ) { + if ( ( table->signature == signature ) && + ( table->index == index ) ) + return table->data; + } + + /* Allocate a new table */ + table = malloc ( sizeof ( *table ) ); + if ( ! table ) + goto err_alloc; + table->signature = signature; + table->index = index; + + /* Construct filename (including numeric suffix) */ + memset ( &u, 0, sizeof ( u ) ); + u.signature = le32_to_cpu ( signature ); + snprintf ( filename, sizeof ( filename ), "%s%s%d", prefix, + u.filename, ( index + 1 ) ); + + /* Read file (with or without numeric suffix for index 0) */ + len = linux_sysfs_read ( filename, &table->data ); + if ( ( len < 0 ) && ( index == 0 ) ) { + filename[ sizeof ( prefix ) - 1 /* NUL */ + + 4 /* signature */ ] = '\0'; + len = linux_sysfs_read ( filename, &table->data ); + } + if ( len < 0 ) { + rc = len; + DBGC ( &linux_acpi_tables, "ACPI could not read %s: %s\n", + filename, strerror ( rc ) ); + goto err_read; + } + header = user_to_virt ( table->data, 0 ); + if ( ( ( ( size_t ) len ) < sizeof ( *header ) ) || + ( ( ( size_t ) len ) < le32_to_cpu ( header->length ) ) ) { + rc = -ENOENT; + DBGC ( &linux_acpi_tables, "ACPI underlength %s (%d bytes)\n", + filename, len ); + goto err_len; + } + + /* Add to list of tables */ + list_add ( &table->list, &linux_acpi_tables ); + DBGC ( &linux_acpi_tables, "ACPI cached %s\n", filename ); + + return table->data; + + err_len: + ufree ( table->data ); + err_read: + free ( table ); + err_alloc: + return UNULL; +} + +/** + * Free cached ACPI data + * + */ +static void linux_acpi_shutdown ( int booting __unused ) { + struct linux_acpi_table *table; + struct linux_acpi_table *tmp; + + list_for_each_entry_safe ( table, tmp, &linux_acpi_tables, list ) { + list_del ( &table->list ); + ufree ( table->data ); + free ( table ); + } +} + +/** ACPI shutdown function */ +struct startup_fn linux_acpi_startup_fn __startup_fn ( STARTUP_NORMAL ) = { + .name = "linux_acpi", + .shutdown = linux_acpi_shutdown, +}; + +PROVIDE_ACPI ( linux, acpi_find, linux_acpi_find ); diff --git a/src/interface/linux/linux_api.c b/src/interface/linux/linux_api.c new file mode 100644 index 000000000..21024ede1 --- /dev/null +++ b/src/interface/linux/linux_api.c @@ -0,0 +1,525 @@ +/* + * Copyright (C) 2010 Piotr Jaroszyński . + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_LIBSLIRP +#include +#endif + +#undef static_assert +#define static_assert(x) _Static_assert(x, #x) + +/** @file + * + * Linux host API + * + */ + +/** Construct prefixed symbol name */ +#define _C1( x, y ) x ## y +#define _C2( x, y ) _C1 ( x, y ) + +/** Construct prefixed symbol name for iPXE symbols */ +#define IPXE_SYM( symbol ) _C2 ( SYMBOL_PREFIX, symbol ) + +/** Provide a prefixed symbol alias visible to iPXE code */ +#define PROVIDE_IPXE_SYM( symbol ) \ + extern typeof ( symbol ) IPXE_SYM ( symbol ) \ + __attribute__ (( alias ( #symbol) )) + +/** Most recent system call error */ +int linux_errno __attribute__ (( nocommon )); + +/****************************************************************************** + * + * Host entry point + * + ****************************************************************************** + */ + +extern int IPXE_SYM ( _linux_start ) ( int argc, char **argv ); + +/** + * Main entry point + * + * @v argc Argument count + * @v argv Argument list + * @ret rc Exit status + */ +int main ( int argc, char **argv ) { + + return IPXE_SYM ( _linux_start ) ( argc, argv ); +} + +/****************************************************************************** + * + * System call wrappers + * + ****************************************************************************** + */ + +/** + * Wrap open() + * + */ +int __asmcall linux_open ( const char *pathname, int flags, ... ) { + va_list args; + mode_t mode; + int ret; + + va_start ( args, flags ); + mode = va_arg ( args, mode_t ); + va_end ( args ); + ret = open ( pathname, flags, mode ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap close() + * + */ +int __asmcall linux_close ( int fd ) { + int ret; + + ret = close ( fd ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap lseek() + * + */ +off_t __asmcall linux_lseek ( int fd, off_t offset, int whence ) { + off_t ret; + + ret = lseek ( fd, offset, whence ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap read() + * + */ +ssize_t __asmcall linux_read ( int fd, void *buf, size_t count ) { + ssize_t ret; + + ret = read ( fd, buf, count ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap write() + * + */ +ssize_t __asmcall linux_write ( int fd, const void *buf, size_t count ) { + ssize_t ret; + + ret = write ( fd, buf, count ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap fcntl() + * + */ +int __asmcall linux_fcntl ( int fd, int cmd, ... ) { + va_list args; + long arg; + int ret; + + va_start ( args, cmd ); + arg = va_arg ( args, long ); + va_end ( args ); + ret = fcntl ( fd, cmd, arg ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap ioctl() + * + */ +int __asmcall linux_ioctl ( int fd, unsigned long request, ... ) { + va_list args; + void *arg; + int ret; + + va_start ( args, request ); + arg = va_arg ( args, void * ); + va_end ( args ); + ret = ioctl ( fd, request, arg ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap part of fstat() + * + */ +int __asmcall linux_fstat_size ( int fd, size_t *size ) { + struct stat stat; + int ret; + + ret = fstat ( fd, &stat ); + *size = stat.st_size; + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap poll() + * + */ +int __asmcall linux_poll ( struct pollfd *fds, unsigned int nfds, + int timeout ) { + int ret; + + ret = poll ( fds, nfds, timeout ); + if ( ret == -1 ) + linux_errno = errno; +} + +/** + * Wrap nanosleep() + * + */ +int __asmcall linux_nanosleep ( const struct timespec *req, + struct timespec *rem ) { + int ret; + + ret = nanosleep ( req, rem ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap usleep() + * + */ +int __asmcall linux_usleep ( unsigned int usec ) { + int ret; + + ret = usleep ( usec ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap gettimeofday() + * + */ +int __asmcall linux_gettimeofday ( struct timeval *tv, struct timezone *tz ) { + int ret; + + ret = gettimeofday ( tv, tz ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap mmap() + * +*/ +void * __asmcall linux_mmap ( void *addr, size_t length, int prot, int flags, + int fd, off_t offset ) { + void *ret; + + ret = mmap ( addr, length, prot, flags, fd, offset ); + if ( ret == MAP_FAILED ) + linux_errno = errno; + return ret; +} + +/** + * Wrap mremap() + * + */ +void * __asmcall linux_mremap ( void *old_address, size_t old_size, + size_t new_size, int flags, ... ) { + va_list args; + void *new_address; + void *ret; + + va_start ( args, flags ); + new_address = va_arg ( args, void * ); + va_end ( args ); + ret = mremap ( old_address, old_size, new_size, flags, new_address ); + if ( ret == MAP_FAILED ) + linux_errno = errno; + return ret; +} + +/** + * Wrap munmap() + * + */ +int __asmcall linux_munmap ( void *addr, size_t length ) { + int ret; + + ret = munmap ( addr, length ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap socket() + * + */ +int __asmcall linux_socket ( int domain, int type, int protocol ) { + int ret; + + ret = socket ( domain, type, protocol ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap bind() + * + */ +int __asmcall linux_bind ( int sockfd, const struct sockaddr *addr, + size_t addrlen ) { + int ret; + + ret = bind ( sockfd, addr, addrlen ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/** + * Wrap sendto() + * + */ +ssize_t __asmcall linux_sendto ( int sockfd, const void *buf, size_t len, + int flags, const struct sockaddr *dest_addr, + size_t addrlen ) { + ssize_t ret; + + ret = sendto ( sockfd, buf, len, flags, dest_addr, addrlen ); + if ( ret == -1 ) + linux_errno = errno; + return ret; +} + +/****************************************************************************** + * + * C library wrappers + * + ****************************************************************************** + */ + +/** + * Wrap strerror() + * + */ +const char * __asmcall linux_strerror ( int linux_errno ) { + + return strerror ( linux_errno ); +} + +/****************************************************************************** + * + * libslirp wrappers + * + ****************************************************************************** + */ + +#ifdef HAVE_LIBSLIRP + +/** + * Wrap slirp_new() + * + */ +struct Slirp * __asmcall +linux_slirp_new ( const struct slirp_config *config, + const struct slirp_callbacks *callbacks, void *opaque ) { + const union { + struct slirp_callbacks callbacks; + SlirpCb cb; + } *u = ( ( typeof ( u ) ) callbacks ); + SlirpConfig cfg; + Slirp *slirp; + + /* Translate configuration */ + memset ( &cfg, 0, sizeof ( cfg ) ); + cfg.version = config->version; + cfg.restricted = config->restricted; + cfg.in_enabled = config->in_enabled; + cfg.vnetwork = config->vnetwork; + cfg.vnetmask = config->vnetmask; + cfg.vhost = config->vhost; + cfg.in6_enabled = config->in6_enabled; + memcpy ( &cfg.vprefix_addr6, &config->vprefix_addr6, + sizeof ( cfg.vprefix_addr6 ) ); + cfg.vprefix_len = config->vprefix_len; + memcpy ( &cfg.vhost6, &config->vhost6, sizeof ( cfg.vhost6 ) ); + cfg.vhostname = config->vhostname; + cfg.tftp_server_name = config->tftp_server_name; + cfg.tftp_path = config->tftp_path; + cfg.bootfile = config->bootfile; + cfg.vdhcp_start = config->vdhcp_start; + cfg.vnameserver = config->vnameserver; + memcpy ( &cfg.vnameserver6, &config->vnameserver6, + sizeof ( cfg.vnameserver6 ) ); + cfg.vdnssearch = config->vdnssearch; + cfg.vdomainname = config->vdomainname; + cfg.if_mtu = config->if_mtu; + cfg.if_mru = config->if_mru; + cfg.disable_host_loopback = config->disable_host_loopback; + cfg.enable_emu = config->enable_emu; + + /* Validate callback structure */ + static_assert ( &u->cb.send_packet == &u->callbacks.send_packet ); + static_assert ( &u->cb.guest_error == &u->callbacks.guest_error ); + static_assert ( &u->cb.clock_get_ns == &u->callbacks.clock_get_ns ); + static_assert ( &u->cb.timer_new == &u->callbacks.timer_new ); + static_assert ( &u->cb.timer_free == &u->callbacks.timer_free ); + static_assert ( &u->cb.timer_mod == &u->callbacks.timer_mod ); + static_assert ( &u->cb.register_poll_fd == + &u->callbacks.register_poll_fd ); + static_assert ( &u->cb.unregister_poll_fd == + &u->callbacks.unregister_poll_fd ); + static_assert ( &u->cb.notify == &u->callbacks.notify ); + + /* Create device */ + slirp = slirp_new ( &cfg, &u->cb, opaque ); + + return slirp; +} + +/** + * Wrap slirp_cleanup() + * + */ +void __asmcall linux_slirp_cleanup ( struct Slirp *slirp ) { + + slirp_cleanup ( slirp ); +} + +/** + * Wrap slirp_input() + * + */ +void __asmcall linux_slirp_input ( struct Slirp *slirp, const uint8_t *pkt, + int pkt_len ) { + + slirp_input ( slirp, pkt, pkt_len ); +} + +/** + * Wrap slirp_pollfds_fill() + * + */ +void __asmcall +linux_slirp_pollfds_fill ( struct Slirp *slirp, uint32_t *timeout, + int ( __asmcall * add_poll ) ( int fd, int events, + void *opaque ), + void *opaque ) { + + slirp_pollfds_fill ( slirp, timeout, add_poll, opaque ); +} + +/** + * Wrap slirp_pollfds_poll() + * + */ +void __asmcall +linux_slirp_pollfds_poll ( struct Slirp *slirp, int select_error, + int ( __asmcall * get_revents ) ( int idx, + void *opaque ), + void *opaque ) { + + slirp_pollfds_poll ( slirp, select_error, get_revents, opaque ); +} + +#endif /* HAVE_LIBSLIRP */ + +/****************************************************************************** + * + * Symbol aliases + * + ****************************************************************************** + */ + +PROVIDE_IPXE_SYM ( linux_errno ); +PROVIDE_IPXE_SYM ( linux_open ); +PROVIDE_IPXE_SYM ( linux_close ); +PROVIDE_IPXE_SYM ( linux_lseek ); +PROVIDE_IPXE_SYM ( linux_read ); +PROVIDE_IPXE_SYM ( linux_write ); +PROVIDE_IPXE_SYM ( linux_fcntl ); +PROVIDE_IPXE_SYM ( linux_ioctl ); +PROVIDE_IPXE_SYM ( linux_fstat_size ); +PROVIDE_IPXE_SYM ( linux_poll ); +PROVIDE_IPXE_SYM ( linux_nanosleep ); +PROVIDE_IPXE_SYM ( linux_usleep ); +PROVIDE_IPXE_SYM ( linux_gettimeofday ); +PROVIDE_IPXE_SYM ( linux_mmap ); +PROVIDE_IPXE_SYM ( linux_mremap ); +PROVIDE_IPXE_SYM ( linux_munmap ); +PROVIDE_IPXE_SYM ( linux_socket ); +PROVIDE_IPXE_SYM ( linux_bind ); +PROVIDE_IPXE_SYM ( linux_sendto ); +PROVIDE_IPXE_SYM ( linux_strerror ); + +#ifdef HAVE_LIBSLIRP +PROVIDE_IPXE_SYM ( linux_slirp_new ); +PROVIDE_IPXE_SYM ( linux_slirp_cleanup ); +PROVIDE_IPXE_SYM ( linux_slirp_input ); +PROVIDE_IPXE_SYM ( linux_slirp_pollfds_fill ); +PROVIDE_IPXE_SYM ( linux_slirp_pollfds_poll ); +#endif /* HAVE_LIBSLIRP */ diff --git a/src/interface/linux/linux_console.c b/src/interface/linux/linux_console.c index 5294fca79..d5415b61c 100644 --- a/src/interface/linux/linux_console.c +++ b/src/interface/linux/linux_console.c @@ -28,7 +28,7 @@ FILE_LICENCE(GPL2_OR_LATER); #include #include -#include +#include #include #include diff --git a/src/interface/linux/linux_entropy.c b/src/interface/linux/linux_entropy.c index 0f8e45d36..257e993a0 100644 --- a/src/interface/linux/linux_entropy.c +++ b/src/interface/linux/linux_entropy.c @@ -31,7 +31,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include -#include +#include #include /** Entropy source filename */ diff --git a/src/interface/linux/linux_nap.c b/src/interface/linux/linux_nap.c index f1d3cd962..3e77bc7f1 100644 --- a/src/interface/linux/linux_nap.c +++ b/src/interface/linux/linux_nap.c @@ -21,7 +21,7 @@ FILE_LICENCE(GPL2_OR_LATER); #include -#include +#include /** @file * diff --git a/src/interface/linux/linux_pci.c b/src/interface/linux/linux_pci.c index 0c140cb89..99c629c19 100644 --- a/src/interface/linux/linux_pci.c +++ b/src/interface/linux/linux_pci.c @@ -26,7 +26,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include +#include #include #include diff --git a/src/interface/linux/linux_smbios.c b/src/interface/linux/linux_smbios.c index 6e5174d23..981873943 100644 --- a/src/interface/linux/linux_smbios.c +++ b/src/interface/linux/linux_smbios.c @@ -20,21 +20,22 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include -#include +#include +#include #include +#include +#include #include +/** SMBIOS entry point filename */ +static const char smbios_entry_filename[] = + "/sys/firmware/dmi/tables/smbios_entry_point"; + /** SMBIOS filename */ -static const char smbios_filename[] = "/dev/mem"; +static const char smbios_filename[] = "/sys/firmware/dmi/tables/DMI"; -/** SMBIOS entry point scan region start address */ -#define SMBIOS_ENTRY_START 0xf0000 - -/** SMBIOS entry point scan region length */ -#define SMBIOS_ENTRY_LEN 0x10000 - -/** SMBIOS mapping alignment */ -#define SMBIOS_ALIGN 0x1000 +/** Cache SMBIOS data */ +static userptr_t smbios_data; /** * Find SMBIOS @@ -43,73 +44,84 @@ static const char smbios_filename[] = "/dev/mem"; * @ret rc Return status code */ static int linux_find_smbios ( struct smbios *smbios ) { - struct smbios_entry entry; - void *entry_mem; - void *smbios_mem; - size_t smbios_offset; - size_t smbios_indent; - size_t smbios_len; - int fd; + struct smbios3_entry *smbios3_entry; + struct smbios_entry *smbios_entry; + userptr_t entry; + void *data; + int len; int rc; - /* Open SMBIOS file */ - fd = linux_open ( smbios_filename, O_RDONLY ); - if ( fd < 0 ) { - rc = -ELINUX ( linux_errno ); - DBGC ( smbios, "SMBIOS could not open %s: %s\n", - smbios_filename, linux_strerror ( linux_errno ) ); - goto err_open; + /* Read entry point file */ + len = linux_sysfs_read ( smbios_entry_filename, &entry ); + if ( len < 0 ) { + rc = len; + DBGC ( smbios, "SMBIOS could not read %s: %s\n", + smbios_entry_filename, strerror ( rc ) ); + goto err_entry; + } + data = user_to_virt ( entry, 0 ); + smbios3_entry = data; + smbios_entry = data; + if ( ( len >= ( ( int ) sizeof ( *smbios3_entry ) ) ) && + ( smbios3_entry->signature == SMBIOS3_SIGNATURE ) ) { + smbios->version = SMBIOS_VERSION ( smbios3_entry->major, + smbios3_entry->minor ); + } else if ( ( len >= ( ( int ) sizeof ( *smbios_entry ) ) ) && + ( smbios_entry->signature == SMBIOS_SIGNATURE ) ) { + smbios->version = SMBIOS_VERSION ( smbios_entry->major, + smbios_entry->minor ); + } else { + DBGC ( smbios, "SMBIOS invalid entry point %s:\n", + smbios_entry_filename ); + DBGC_HDA ( smbios, 0, data, len ); + rc = -EINVAL; + goto err_version; } - /* Map the region potentially containing the SMBIOS entry point */ - entry_mem = linux_mmap ( NULL, SMBIOS_ENTRY_LEN, PROT_READ, MAP_SHARED, - fd, SMBIOS_ENTRY_START ); - if ( entry_mem == MAP_FAILED ) { - rc = -ELINUX ( linux_errno ); - DBGC ( smbios, "SMBIOS could not mmap %s (%#x+%#x): %s\n", - smbios_filename, SMBIOS_ENTRY_START, SMBIOS_ENTRY_LEN, - linux_strerror ( linux_errno ) ); - goto err_mmap_entry; + /* Read SMBIOS file */ + len = linux_sysfs_read ( smbios_filename, &smbios_data ); + if ( len < 0 ) { + rc = len; + DBGC ( smbios, "SMBIOS could not read %s: %s\n", + smbios_filename, strerror ( rc ) ); + goto err_read; } - /* Scan for the SMBIOS entry point */ - if ( ( rc = find_smbios_entry ( virt_to_user ( entry_mem ), - SMBIOS_ENTRY_LEN, &entry ) ) != 0 ) - goto err_find_entry; + /* Populate SMBIOS descriptor */ + smbios->address = smbios_data; + smbios->len = len; + smbios->count = 0; - /* Map the region containing the SMBIOS structures */ - smbios_indent = ( entry.smbios_address & ( SMBIOS_ALIGN - 1 ) ); - smbios_offset = ( entry.smbios_address - smbios_indent ); - smbios_len = ( entry.smbios_len + smbios_indent ); - smbios_mem = linux_mmap ( NULL, smbios_len, PROT_READ, MAP_SHARED, - fd, smbios_offset ); - if ( smbios_mem == MAP_FAILED ) { - rc = -ELINUX ( linux_errno ); - DBGC ( smbios, "SMBIOS could not mmap %s (%#zx+%#zx): %s\n", - smbios_filename, smbios_offset, smbios_len, - linux_strerror ( linux_errno ) ); - goto err_mmap_smbios; - } - - /* Fill in entry point descriptor structure */ - smbios->address = virt_to_user ( smbios_mem + smbios_indent ); - smbios->len = entry.smbios_len; - smbios->count = entry.smbios_count; - smbios->version = SMBIOS_VERSION ( entry.major, entry.minor ); - - /* Unmap the entry point region (no longer required) */ - linux_munmap ( entry_mem, SMBIOS_ENTRY_LEN ); + /* Free entry point */ + ufree ( entry ); return 0; - linux_munmap ( smbios_mem, smbios_len ); - err_mmap_smbios: - err_find_entry: - linux_munmap ( entry_mem, SMBIOS_ENTRY_LEN ); - err_mmap_entry: - linux_close ( fd ); - err_open: + ufree ( smbios_data ); + err_read: + err_version: + ufree ( entry ); + err_entry: return rc; } +/** + * Free cached SMBIOS data + * + */ +static void linux_smbios_shutdown ( int booting __unused ) { + + /* Clear SMBIOS data pointer */ + smbios_clear(); + + /* Free SMBIOS data */ + ufree ( smbios_data ); +} + +/** SMBIOS shutdown function */ +struct startup_fn linux_smbios_startup_fn __startup_fn ( STARTUP_NORMAL ) = { + .name = "linux_smbios", + .shutdown = linux_smbios_shutdown, +}; + PROVIDE_SMBIOS ( linux, find_smbios, linux_find_smbios ); diff --git a/src/interface/linux/linux_sysfs.c b/src/interface/linux/linux_sysfs.c new file mode 100644 index 000000000..4f0027cd4 --- /dev/null +++ b/src/interface/linux/linux_sysfs.c @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include + +/** @file + * + * Linux sysfs files + * + */ + +/** Read blocksize */ +#define LINUX_SYSFS_BLKSIZE 4096 + +/** + * Read file from sysfs + * + * @v filename Filename + * @v data Data to fill in + * @ret len Length read, or negative error + */ +int linux_sysfs_read ( const char *filename, userptr_t *data ) { + userptr_t tmp; + ssize_t read; + size_t len; + int fd; + int rc; + + /* Open file */ + fd = linux_open ( filename, O_RDONLY ); + if ( fd < 0 ) { + rc = -ELINUX ( linux_errno ); + DBGC ( filename, "LINUX could not open %s: %s\n", + filename, linux_strerror ( linux_errno ) ); + goto err_open; + } + + /* Read file */ + for ( *data = UNULL, len = 0 ; ; len += read ) { + + /* (Re)allocate space */ + tmp = urealloc ( *data, ( len + LINUX_SYSFS_BLKSIZE ) ); + if ( ! tmp ) { + rc = -ENOMEM; + goto err_alloc; + } + *data = tmp; + + /* Read from file */ + read = linux_read ( fd, user_to_virt ( *data, len ), + LINUX_SYSFS_BLKSIZE ); + if ( read == 0 ) + break; + if ( read < 0 ) { + DBGC ( filename, "LINUX could not read %s: %s\n", + filename, linux_strerror ( linux_errno ) ); + goto err_read; + } + } + + /* Close file */ + linux_close ( fd ); + + DBGC ( filename, "LINUX read %s\n", filename ); + return len; + + err_read: + err_alloc: + ufree ( *data ); + linux_close ( fd ); + err_open: + return rc; +} diff --git a/src/interface/linux/linux_time.c b/src/interface/linux/linux_time.c index 9e99fe9cd..9d410f8e0 100644 --- a/src/interface/linux/linux_time.c +++ b/src/interface/linux/linux_time.c @@ -32,7 +32,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include -#include +#include #include /** diff --git a/src/interface/linux/linux_timer.c b/src/interface/linux/linux_timer.c index 9c5e96f2b..418fd046a 100644 --- a/src/interface/linux/linux_timer.c +++ b/src/interface/linux/linux_timer.c @@ -21,7 +21,7 @@ FILE_LICENCE(GPL2_OR_LATER); #include #include -#include +#include /** @file * diff --git a/src/interface/linux/linux_umalloc.c b/src/interface/linux/linux_umalloc.c index aa0052c53..a7250fa5b 100644 --- a/src/interface/linux/linux_umalloc.c +++ b/src/interface/linux/linux_umalloc.c @@ -29,7 +29,7 @@ FILE_LICENCE(GPL2_OR_LATER); #include #include -#include +#include /** Special address returned for empty allocations */ #define NOWHERE ((void *)-1) diff --git a/src/include/hci/linux_args.h b/src/interface/linux/linuxprefix.c similarity index 58% rename from src/include/hci/linux_args.h rename to src/interface/linux/linuxprefix.c index ae1ed0526..f38236202 100644 --- a/src/include/hci/linux_args.h +++ b/src/interface/linux/linuxprefix.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 Piotr Jaroszyński + * Copyright (C) 2021 Michael Brown . * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -13,19 +13,26 @@ * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. */ -#ifndef _HCI_LINUX_ARGS_H -#define _HCI_LINUX_ARGS_H - -FILE_LICENCE(GPL2_OR_LATER); +#include +#include /** - * Save argc and argv for later access. + * Linux entry point * - * To be called by linuxprefix + * @v argc Argument count + * @v argv Argument list + * @ret rc Return status code */ -extern __asmcall void save_args(int argc, char **argv); +int __asmcall _linux_start ( int argc, char **argv ) { -#endif /* _HCI_LINUX_ARGS_H */ + /* Store command-line arguments */ + linux_argc = argc; + linux_argv = argv; + + /* Run iPXE */ + return main(); +} diff --git a/src/interface/smbios/smbios.c b/src/interface/smbios/smbios.c index 1dcf819c2..12a080da2 100644 --- a/src/interface/smbios/smbios.c +++ b/src/interface/smbios/smbios.c @@ -130,8 +130,8 @@ int find_smbios_structure ( unsigned int type, unsigned int instance, assert ( smbios.address != UNULL ); /* Scan through list of structures */ - while ( ( ( offset + sizeof ( structure->header ) ) < smbios.len ) - && ( count < smbios.count ) ) { + while ( ( ( offset + sizeof ( structure->header ) ) < smbios.len ) && + ( ( smbios.count == 0 ) || ( count < smbios.count ) ) ) { /* Read next SMBIOS structure header */ copy_from_user ( &structure->header, smbios.address, offset, @@ -157,6 +157,11 @@ int find_smbios_structure ( unsigned int type, unsigned int instance, "strings length %zx\n", offset, structure->header.type, structure->header.len, structure->strings_len ); + /* Stop if we have reached an end-of-table marker */ + if ( ( smbios.count == 0 ) && + ( structure->header.type == SMBIOS_TYPE_END ) ) + break; + /* If this is the structure we want, return */ if ( ( structure->header.type == type ) && ( instance-- == 0 ) ) { @@ -250,3 +255,13 @@ int smbios_version ( void ) { return smbios.version; } + +/** + * Clear SMBIOS entry point descriptor + * + */ +void smbios_clear ( void ) { + + /* Clear address */ + smbios.address = UNULL; +} diff --git a/src/interface/xen/xenstore.c b/src/interface/xen/xenstore.c index a14881fcd..caeb4e934 100644 --- a/src/interface/xen/xenstore.c +++ b/src/interface/xen/xenstore.c @@ -68,14 +68,14 @@ static void xenstore_send ( struct xen_hypervisor *xen, const void *data, XENSTORE_RING_IDX cons; XENSTORE_RING_IDX idx; const char *bytes = data; - size_t offset = 0; + size_t offset; size_t fill; DBGCP ( intf, "XENSTORE raw request:\n" ); DBGCP_HDA ( intf, MASK_XENSTORE_IDX ( prod ), data, len ); /* Write one byte at a time */ - while ( offset < len ) { + for ( offset = 0 ; offset < len ; offset++ ) { /* Wait for space to become available */ while ( 1 ) { @@ -90,7 +90,7 @@ static void xenstore_send ( struct xen_hypervisor *xen, const void *data, /* Write byte */ idx = MASK_XENSTORE_IDX ( prod++ ); - writeb ( bytes[offset++], &intf->req[idx] ); + writeb ( bytes[offset], &intf->req[idx] ); } /* Update producer counter */ @@ -125,13 +125,13 @@ static void xenstore_recv ( struct xen_hypervisor *xen, void *data, XENSTORE_RING_IDX prod; XENSTORE_RING_IDX idx; char *bytes = data; - size_t offset = 0; + size_t offset; size_t fill; DBGCP ( intf, "XENSTORE raw response:\n" ); /* Read one byte at a time */ - while ( offset < len ) { + for ( offset = 0 ; offset < len ; offset++ ) { /* Wait for data to be ready */ while ( 1 ) { @@ -147,7 +147,7 @@ static void xenstore_recv ( struct xen_hypervisor *xen, void *data, /* Read byte */ idx = MASK_XENSTORE_IDX ( cons++ ); if ( data ) - bytes[offset++] = readb ( &intf->rsp[idx] ); + bytes[offset] = readb ( &intf->rsp[idx] ); } if ( data ) DBGCP_HDA ( intf, MASK_XENSTORE_IDX ( cons - len ), data, len ); diff --git a/src/libgcc/__divmoddi4.c b/src/libgcc/__divmoddi4.c index 95e328d06..c00acb5ab 100644 --- a/src/libgcc/__divmoddi4.c +++ b/src/libgcc/__divmoddi4.c @@ -1,6 +1,6 @@ #include "libgcc.h" -__libgcc int64_t __divmoddi4(int64_t num, int64_t den, int64 *rem_p) +__libgcc int64_t __divmoddi4(int64_t num, int64_t den, int64_t *rem_p) { int minus = 0; int64_t v; diff --git a/src/net/80211/wpa.c b/src/net/80211/wpa.c index 77f66d825..1484d0e80 100644 --- a/src/net/80211/wpa.c +++ b/src/net/80211/wpa.c @@ -304,8 +304,9 @@ static void wpa_derive_ptk ( struct wpa_common_ctx *ctx ) memcpy ( ptk_data.nonce2, ctx->Anonce, WPA_NONCE_LEN ); } - DBGC2 ( ctx, "WPA %p A1 %s, A2 %s\n", ctx, eth_ntoa ( ptk_data.mac1 ), - eth_ntoa ( ptk_data.mac2 ) ); + DBGC2 ( ctx, "WPA %p A1 %s", ctx, eth_ntoa ( ptk_data.mac1 ) ); + DBGC2 ( ctx, ", A2 %s\n", eth_ntoa ( ptk_data.mac2 ) ); + DBGC2 ( ctx, "WPA %p Nonce1, Nonce2:\n", ctx ); DBGC2_HD ( ctx, ptk_data.nonce1, WPA_NONCE_LEN ); DBGC2_HD ( ctx, ptk_data.nonce2, WPA_NONCE_LEN ); @@ -413,12 +414,13 @@ static int wpa_maybe_install_gtk ( struct wpa_common_ctx *ctx, static struct io_buffer * wpa_alloc_frame ( int kdlen ) { struct io_buffer *ret = alloc_iob ( sizeof ( struct eapol_key_pkt ) + - kdlen + EAPOL_HDR_LEN + + kdlen + + sizeof ( struct eapol_header ) + MAX_LL_HEADER_LEN ); if ( ! ret ) return NULL; - iob_reserve ( ret, MAX_LL_HEADER_LEN + EAPOL_HDR_LEN ); + iob_reserve ( ret, MAX_LL_HEADER_LEN + sizeof ( struct eapol_header ) ); memset ( iob_put ( ret, sizeof ( struct eapol_key_pkt ) ), 0, sizeof ( struct eapol_key_pkt ) ); @@ -441,19 +443,19 @@ static int wpa_send_eapol ( struct io_buffer *iob, struct wpa_common_ctx *ctx, struct wpa_kie *kie ) { struct eapol_key_pkt *pkt = iob->data; - struct eapol_frame *eapol = iob_push ( iob, EAPOL_HDR_LEN ); + struct eapol_header *eapol = iob_push ( iob, sizeof ( *eapol ) ); pkt->info = htons ( pkt->info ); pkt->keysize = htons ( pkt->keysize ); pkt->datalen = htons ( pkt->datalen ); pkt->replay = cpu_to_be64 ( pkt->replay ); - eapol->version = EAPOL_THIS_VERSION; + eapol->version = EAPOL_VERSION_2001; eapol->type = EAPOL_TYPE_KEY; - eapol->length = htons ( iob->tail - iob->data - sizeof ( *eapol ) ); + eapol->len = htons ( iob->tail - iob->data - sizeof ( *eapol ) ); memset ( pkt->mic, 0, sizeof ( pkt->mic ) ); if ( kie ) - kie->mic ( &ctx->ptk.kck, eapol, EAPOL_HDR_LEN + + kie->mic ( &ctx->ptk.kck, eapol, sizeof ( *eapol ) + sizeof ( *pkt ) + ntohs ( pkt->datalen ), pkt->mic ); @@ -761,21 +763,23 @@ static int wpa_handle_1_of_2 ( struct wpa_common_ctx *ctx, * * @v iob I/O buffer * @v netdev Network device - * @v ll_dest Link-layer destination address * @v ll_source Source link-layer address */ static int eapol_key_rx ( struct io_buffer *iob, struct net_device *netdev, - const void *ll_dest __unused, const void *ll_source ) { struct net80211_device *dev = net80211_get ( netdev ); - struct eapol_key_pkt *pkt = iob->data; + struct eapol_header *eapol; + struct eapol_key_pkt *pkt; int is_rsn, found_ctx; struct wpa_common_ctx *ctx; int rc = 0; struct wpa_kie *kie; u8 their_mic[16], our_mic[16]; + eapol = iob->data; + pkt = ( ( ( void * ) eapol ) + sizeof ( *eapol ) ); + if ( pkt->type != EAPOL_KEY_TYPE_WPA && pkt->type != EAPOL_KEY_TYPE_RSN ) { DBG ( "EAPOL-Key: packet not of 802.11 type\n" ); @@ -839,8 +843,8 @@ static int eapol_key_rx ( struct io_buffer *iob, struct net_device *netdev, if ( ntohs ( pkt->info ) & EAPOL_KEY_INFO_KEY_MIC ) { memcpy ( their_mic, pkt->mic, sizeof ( pkt->mic ) ); memset ( pkt->mic, 0, sizeof ( pkt->mic ) ); - kie->mic ( &ctx->ptk.kck, ( void * ) pkt - EAPOL_HDR_LEN, - EAPOL_HDR_LEN + sizeof ( *pkt ) + + kie->mic ( &ctx->ptk.kck, eapol, + sizeof ( *eapol ) + sizeof ( *pkt ) + ntohs ( pkt->datalen ), our_mic ); DBGC2 ( ctx, "WPA %p MIC comparison (theirs, ours):\n", ctx ); DBGC2_HD ( ctx, their_mic, 16 ); diff --git a/src/net/aoe.c b/src/net/aoe.c index 3a6611d04..e785e8979 100644 --- a/src/net/aoe.c +++ b/src/net/aoe.c @@ -42,6 +42,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include /** @file @@ -68,35 +69,6 @@ static LIST_HEAD ( aoe_devices ); /** List of active AoE commands */ static LIST_HEAD ( aoe_commands ); -/** An AoE device */ -struct aoe_device { - /** Reference counter */ - struct refcnt refcnt; - - /** Network device */ - struct net_device *netdev; - /** ATA command issuing interface */ - struct interface ata; - - /** Major number */ - uint16_t major; - /** Minor number */ - uint8_t minor; - /** Target MAC address */ - uint8_t target[MAX_LL_ADDR_LEN]; - - /** Saved timeout value */ - unsigned long timeout; - - /** Configuration command interface */ - struct interface config; - /** Device is configued */ - int configured; - - /** ACPI descriptor */ - struct acpi_descriptor desc; -}; - /** An AoE command */ struct aoe_command { /** Reference count */ @@ -811,6 +783,7 @@ static struct interface_operation aoedev_ata_op[] = { INTF_OP ( acpi_describe, struct aoe_device *, aoedev_describe ), INTF_OP ( identify_device, struct aoe_device *, aoedev_identify_device ), + EFI_INTF_OP ( efi_describe, struct aoe_device *, efi_aoe_path ), }; /** AoE device ATA interface descriptor */ diff --git a/src/net/eap.c b/src/net/eap.c new file mode 100644 index 000000000..8d1d540fb --- /dev/null +++ b/src/net/eap.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * Extensible Authentication Protocol + * + */ + +/** + * Handle EAP Request-Identity + * + * @v netdev Network device + * @ret rc Return status code + */ +static int eap_rx_request_identity ( struct net_device *netdev ) { + + /* Treat Request-Identity as blocking the link */ + DBGC ( netdev, "EAP %s Request-Identity blocking link\n", + netdev->name ); + netdev_link_block ( netdev, EAP_BLOCK_TIMEOUT ); + + return 0; +} + +/** + * Handle EAP Request + * + * @v netdev Network device + * @v req EAP request + * @v len Length of EAP request + * @ret rc Return status code + */ +static int eap_rx_request ( struct net_device *netdev, + const struct eap_request *req, size_t len ) { + + /* Sanity check */ + if ( len < sizeof ( *req ) ) { + DBGC ( netdev, "EAP %s underlength request:\n", netdev->name ); + DBGC_HDA ( netdev, 0, req, len ); + return -EINVAL; + } + + /* Handle according to type */ + switch ( req->type ) { + case EAP_TYPE_IDENTITY: + return eap_rx_request_identity ( netdev ); + default: + DBGC ( netdev, "EAP %s requested type %d unknown:\n", + netdev->name, req->type ); + DBGC_HDA ( netdev, 0, req, len ); + return -ENOTSUP; + } +} + +/** + * Handle EAP Success + * + * @v netdev Network device + * @ret rc Return status code + */ +static int eap_rx_success ( struct net_device *netdev ) { + + /* Mark link as unblocked */ + DBGC ( netdev, "EAP %s Success\n", netdev->name ); + netdev_link_unblock ( netdev ); + + return 0; +} + +/** + * Handle EAP Failure + * + * @v netdev Network device + * @ret rc Return status code + */ +static int eap_rx_failure ( struct net_device *netdev ) { + + /* Record error */ + DBGC ( netdev, "EAP %s Failure\n", netdev->name ); + return -EPERM; +} + +/** + * Handle EAP packet + * + * @v netdev Network device + * @v data EAP packet + * @v len Length of EAP packet + * @ret rc Return status code + */ +int eap_rx ( struct net_device *netdev, const void *data, size_t len ) { + const union eap_packet *eap = data; + + /* Sanity check */ + if ( len < sizeof ( eap->hdr ) ) { + DBGC ( netdev, "EAP %s underlength header:\n", netdev->name ); + DBGC_HDA ( netdev, 0, eap, len ); + return -EINVAL; + } + + /* Handle according to code */ + switch ( eap->hdr.code ) { + case EAP_CODE_REQUEST: + return eap_rx_request ( netdev, &eap->req, len ); + case EAP_CODE_SUCCESS: + return eap_rx_success ( netdev ); + case EAP_CODE_FAILURE: + return eap_rx_failure ( netdev ); + default: + DBGC ( netdev, "EAP %s unsupported code %d\n", + netdev->name, eap->hdr.code ); + DBGC_HDA ( netdev, 0, eap, len ); + return -ENOTSUP; + } +} diff --git a/src/net/eapol.c b/src/net/eapol.c index eb0362994..3578f0e37 100644 --- a/src/net/eapol.c +++ b/src/net/eapol.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Joshua Oreman . + * Copyright (C) 2021 Michael Brown . * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -15,74 +15,127 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. */ -FILE_LICENCE ( GPL2_OR_LATER ); +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include +#include +#include +#include +#include +#include /** @file * - * 802.1X Extensible Authentication Protocol over LANs demultiplexer + * Extensible Authentication Protocol over LAN (EAPoL) * */ -#include -#include -#include -#include -#include -#include - /** - * Receive EAPOL network-layer packet + * Process EAPoL packet * - * @v iob I/O buffer - * @v netdev Network device - * @v ll_dest Link-layer destination address - * @v ll_source Link-layer source address - * @v flags Packet flags - * - * This function takes ownership of the I/O buffer passed to it. + * @v iobuf I/O buffer + * @v netdev Network device + * @v ll_dest Link-layer destination address + * @v ll_source Link-layer source address + * @v flags Packet flags + * @ret rc Return status code */ -static int eapol_rx ( struct io_buffer *iob, struct net_device *netdev, - const void *ll_dest, const void *ll_source, +static int eapol_rx ( struct io_buffer *iobuf, struct net_device *netdev, + const void *ll_dest __unused, const void *ll_source, unsigned int flags __unused ) { - struct eapol_frame *eapol = iob->data; + struct eapol_header *eapol; struct eapol_handler *handler; + size_t remaining; + size_t len; + int rc; - if ( iob_len ( iob ) < EAPOL_HDR_LEN ) { - free_iob ( iob ); - return -EINVAL; + /* Sanity checks */ + if ( iob_len ( iobuf ) < sizeof ( *eapol ) ) { + DBGC ( netdev, "EAPOL %s underlength header:\n", + netdev->name ); + DBGC_HDA ( netdev, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto drop; + } + eapol = iobuf->data; + remaining = ( iob_len ( iobuf ) - sizeof ( *eapol ) ); + len = ntohs ( eapol->len ); + if ( len > remaining ) { + DBGC ( netdev, "EAPOL %s v%d type %d len %zd underlength " + "payload:\n", netdev->name, eapol->version, + eapol->type, len ); + DBGC_HDA ( netdev, 0, iobuf->data, iob_len ( iobuf ) ); + rc = -EINVAL; + goto drop; } + /* Strip any trailing padding */ + iob_unput ( iobuf, ( len - remaining ) ); + + /* Handle according to type */ for_each_table_entry ( handler, EAPOL_HANDLERS ) { if ( handler->type == eapol->type ) { - iob_pull ( iob, EAPOL_HDR_LEN ); - return handler->rx ( iob, netdev, ll_dest, ll_source ); + return handler->rx ( iob_disown ( iobuf ) , netdev, + ll_source ); } } + rc = -ENOTSUP; + DBGC ( netdev, "EAPOL %s v%d type %d unsupported\n", + netdev->name, eapol->version, eapol->type ); + DBGC_HDA ( netdev, 0, iobuf->data, iob_len ( iobuf ) ); - free_iob ( iob ); - return -( ENOTSUP | ( ( eapol->type & 0x1f ) << 8 ) ); + drop: + free_iob ( iobuf ); + return rc; } -/** - * Transcribe EAPOL network-layer address - * - * @v net_addr Network-layer address - * @ret str String representation of network-layer address - * - * EAPOL doesn't have network-layer addresses, so we just return the - * string @c "". - */ -static const char * eapol_ntoa ( const void *net_addr __unused ) -{ - return ""; -} - -/** EAPOL network protocol */ +/** EAPoL protocol */ struct net_protocol eapol_protocol __net_protocol = { .name = "EAPOL", - .rx = eapol_rx, - .ntoa = eapol_ntoa, .net_proto = htons ( ETH_P_EAPOL ), + .rx = eapol_rx, +}; + +/** + * Process EAPoL-encapsulated EAP packet + * + * @v netdev Network device + * @v ll_source Link-layer source address + * @ret rc Return status code + */ +static int eapol_eap_rx ( struct io_buffer *iobuf, struct net_device *netdev, + const void *ll_source __unused ) { + struct eapol_header *eapol; + int rc; + + /* Sanity check */ + assert ( iob_len ( iobuf ) >= sizeof ( *eapol ) ); + + /* Strip EAPoL header */ + eapol = iob_pull ( iobuf, sizeof ( *eapol ) ); + + /* Process EAP packet */ + if ( ( rc = eap_rx ( netdev, iobuf->data, iob_len ( iobuf ) ) ) != 0 ) { + DBGC ( netdev, "EAPOL %s v%d EAP failed: %s\n", + netdev->name, eapol->version, strerror ( rc ) ); + goto drop; + } + + drop: + free_iob ( iobuf ); + return rc; +} + +/** EAPoL handler for EAP packets */ +struct eapol_handler eapol_eap __eapol_handler = { + .type = EAPOL_TYPE_EAP, + .rx = eapol_eap_rx, }; diff --git a/src/net/eth_slow.c b/src/net/eth_slow.c index baa51dbc1..1103a49f3 100644 --- a/src/net/eth_slow.c +++ b/src/net/eth_slow.c @@ -153,6 +153,14 @@ static int eth_slow_lacp_rx ( struct io_buffer *iobuf, eth_slow_lacp_dump ( iobuf, netdev, "RX" ); + /* Check for looped-back packets */ + if ( memcmp ( lacp->actor.system, netdev->ll_addr, + sizeof ( lacp->actor.system ) ) == 0 ) { + DBGC ( netdev, "SLOW %s RX loopback detected\n", + netdev->name ); + return -ELOOP; + } + /* If partner is not in sync, collecting, and distributing, * then block the link until after the next expected LACP * packet. @@ -278,6 +286,9 @@ static int eth_slow_rx ( struct io_buffer *iobuf, return -EINVAL; } + /* Strip any trailing padding */ + iob_unput ( iobuf, ( sizeof ( *eth_slow ) - iob_len ( iobuf ) ) ); + /* Handle according to subtype */ switch ( eth_slow->header.subtype ) { case ETH_SLOW_SUBTYPE_LACP: diff --git a/src/net/fcp.c b/src/net/fcp.c index d92cfdcf3..f78f7bd9b 100644 --- a/src/net/fcp.c +++ b/src/net/fcp.c @@ -43,6 +43,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include #include @@ -158,10 +159,8 @@ struct fcp_device { /** List of active commands */ struct list_head fcpcmds; - /** Fibre Channel WWN (for boot firmware table) */ - struct fc_name wwn; - /** SCSI LUN (for boot firmware table) */ - struct scsi_lun lun; + /** Device description (for boot firmware table) */ + struct fcp_description desc; }; /** An FCP command */ @@ -864,9 +863,9 @@ static int fcpdev_edd_describe ( struct fcp_device *fcpdev, } lun; type->type = cpu_to_le64 ( EDD_INTF_TYPE_FIBRE ); - memcpy ( &wwn.fc, &fcpdev->wwn, sizeof ( wwn.fc ) ); + memcpy ( &wwn.fc, &fcpdev->desc.wwn, sizeof ( wwn.fc ) ); path->fibre.wwn = be64_to_cpu ( wwn.u64 ); - memcpy ( &lun.scsi, &fcpdev->lun, sizeof ( lun.scsi ) ); + memcpy ( &lun.scsi, &fcpdev->desc.lun, sizeof ( lun.scsi ) ); path->fibre.lun = be64_to_cpu ( lun.u64 ); return 0; } @@ -893,6 +892,18 @@ static struct device * fcpdev_identify_device ( struct fcp_device *fcpdev ) { return identify_device ( &fcpdev->user.ulp->peer->port->transport ); } +/** + * Describe as an EFI device path + * + * @v fcp FCP device + * @ret path EFI device path, or NULL on error + */ +static EFI_DEVICE_PATH_PROTOCOL * +fcpdev_efi_describe ( struct fcp_device *fcpdev ) { + + return efi_fcp_path ( &fcpdev->desc ); +} + /** FCP device SCSI interface operations */ static struct interface_operation fcpdev_scsi_op[] = { INTF_OP ( scsi_command, struct fcp_device *, fcpdev_scsi_command ), @@ -901,6 +912,7 @@ static struct interface_operation fcpdev_scsi_op[] = { INTF_OP ( edd_describe, struct fcp_device *, fcpdev_edd_describe ), INTF_OP ( identify_device, struct fcp_device *, fcpdev_identify_device ), + EFI_INTF_OP ( efi_describe, struct fcp_device *, fcpdev_efi_describe ), }; /** FCP device SCSI interface descriptor */ @@ -965,8 +977,8 @@ static int fcpdev_open ( struct interface *parent, struct fc_name *wwn, fc_ulp_attach ( ulp, &fcpdev->user ); /* Preserve parameters required for boot firmware table */ - memcpy ( &fcpdev->wwn, wwn, sizeof ( fcpdev->wwn ) ); - memcpy ( &fcpdev->lun, lun, sizeof ( fcpdev->lun ) ); + memcpy ( &fcpdev->desc.wwn, wwn, sizeof ( fcpdev->desc.wwn ) ); + memcpy ( &fcpdev->desc.lun, lun, sizeof ( fcpdev->desc.lun ) ); /* Attach SCSI device to parent interface */ if ( ( rc = scsi_open ( parent, &fcpdev->scsi, lun ) ) != 0 ) { diff --git a/src/net/infiniband.c b/src/net/infiniband.c index 3b79a660c..e19e121c1 100644 --- a/src/net/infiniband.c +++ b/src/net/infiniband.c @@ -813,26 +813,6 @@ void ib_mcast_detach ( struct ib_device *ibdev, struct ib_queue_pair *qp, *************************************************************************** */ -/** - * Count Infiniband HCA ports - * - * @v ibdev Infiniband device - * @ret num_ports Number of ports - */ -int ib_count_ports ( struct ib_device *ibdev ) { - struct ib_device *tmp; - int num_ports = 0; - - /* Search for IB devices with the same physical device to - * identify port count. - */ - for_each_ibdev ( tmp ) { - if ( tmp->dev == ibdev->dev ) - num_ports++; - } - return num_ports; -} - /** * Set port information * diff --git a/src/net/infiniband/ib_sma.c b/src/net/infiniband/ib_sma.c index 24ec9f4e0..b553e66b1 100644 --- a/src/net/infiniband/ib_sma.c +++ b/src/net/infiniband/ib_sma.c @@ -63,7 +63,7 @@ static void ib_sma_node_info ( struct ib_device *ibdev, node_info->base_version = IB_MGMT_BASE_VERSION; node_info->class_version = IB_SMP_CLASS_VERSION; node_info->node_type = IB_NODE_TYPE_HCA; - node_info->num_ports = ib_count_ports ( ibdev ); + node_info->num_ports = ibdev->ports; memcpy ( &node_info->sys_guid, &ibdev->node_guid, sizeof ( node_info->sys_guid ) ); memcpy ( &node_info->node_guid, &ibdev->node_guid, diff --git a/src/net/infiniband/ib_srp.c b/src/net/infiniband/ib_srp.c index cf1ef3bfd..e6b43291f 100644 --- a/src/net/infiniband/ib_srp.c +++ b/src/net/infiniband/ib_srp.c @@ -37,6 +37,7 @@ FILE_LICENCE ( BSD2 ); #include #include #include +#include #include #include #include @@ -69,39 +70,6 @@ struct acpi_model ib_sbft_model __acpi_model; ****************************************************************************** */ -/** - * An IB SRP sBFT created by iPXE - */ -struct ipxe_ib_sbft { - /** The table header */ - struct sbft_table table; - /** The SCSI subtable */ - struct sbft_scsi_subtable scsi; - /** The SRP subtable */ - struct sbft_srp_subtable srp; - /** The Infiniband subtable */ - struct sbft_ib_subtable ib; -}; - -/** An Infiniband SRP device */ -struct ib_srp_device { - /** Reference count */ - struct refcnt refcnt; - - /** SRP transport interface */ - struct interface srp; - /** CMRC interface */ - struct interface cmrc; - - /** Infiniband device */ - struct ib_device *ibdev; - - /** ACPI descriptor */ - struct acpi_descriptor desc; - /** Boot firmware table parameters */ - struct ipxe_ib_sbft sbft; -}; - /** * Free IB SRP device * @@ -153,6 +121,7 @@ static struct interface_descriptor ib_srp_cmrc_desc = static struct interface_operation ib_srp_srp_op[] = { INTF_OP ( acpi_describe, struct ib_srp_device *, ib_srp_describe ), INTF_OP ( intf_close, struct ib_srp_device *, ib_srp_close ), + EFI_INTF_OP ( efi_describe, struct ib_srp_device *, efi_ib_srp_path ), }; /** IB SRP SRP interface descriptor */ @@ -498,14 +467,21 @@ static struct ib_srp_root_path_parser ib_srp_rp_parser[] = { static int ib_srp_parse_root_path ( const char *rp_string, struct ib_srp_root_path *rp ) { struct ib_srp_root_path_parser *parser; - char rp_string_copy[ strlen ( rp_string ) + 1 ]; char *rp_comp[IB_SRP_NUM_RP_COMPONENTS]; - char *rp_string_tmp = rp_string_copy; + char *rp_string_copy; + char *rp_string_tmp; unsigned int i = 0; int rc; + /* Create modifiable copy of root path */ + rp_string_copy = strdup ( rp_string ); + if ( ! rp_string_copy ) { + rc = -ENOMEM; + goto err_strdup; + } + rp_string_tmp = rp_string_copy; + /* Split root path into component parts */ - strcpy ( rp_string_copy, rp_string ); while ( 1 ) { rp_comp[i++] = rp_string_tmp; if ( i == IB_SRP_NUM_RP_COMPONENTS ) @@ -514,7 +490,8 @@ static int ib_srp_parse_root_path ( const char *rp_string, if ( ! *rp_string_tmp ) { DBG ( "IBSRP root path \"%s\" too short\n", rp_string ); - return -EINVAL_RP_TOO_SHORT; + rc = -EINVAL_RP_TOO_SHORT; + goto err_split; } } *(rp_string_tmp++) = '\0'; @@ -527,11 +504,15 @@ static int ib_srp_parse_root_path ( const char *rp_string, DBG ( "IBSRP could not parse \"%s\" in root path " "\"%s\": %s\n", rp_comp[i], rp_string, strerror ( rc ) ); - return rc; + goto err_parse; } } - return 0; + err_parse: + err_split: + free ( rp_string_copy ); + err_strdup: + return rc; } /** diff --git a/src/net/ndp.c b/src/net/ndp.c index f28e71cbd..75e531648 100644 --- a/src/net/ndp.c +++ b/src/net/ndp.c @@ -31,6 +31,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include #include +#include #include /** @file @@ -39,6 +40,18 @@ FILE_LICENCE ( GPL2_OR_LATER ); * */ +/** Router discovery minimum timeout */ +#define IPV6CONF_MIN_TIMEOUT ( TICKS_PER_SEC / 8 ) + +/** Router discovery maximum timeout */ +#define IPV6CONF_MAX_TIMEOUT ( TICKS_PER_SEC * 3 ) + +/** Router discovery blocked link retry timeout */ +#define IPV6CONF_BLOCK_TIMEOUT ( TICKS_PER_SEC ) + +/** Router discovery maximum number of deferrals */ +#define IPV6CONF_MAX_DEFERRALS 180 + static struct ipv6conf * ipv6conf_demux ( struct net_device *netdev ); static int ipv6conf_rx_router_advertisement ( struct net_device *netdev, @@ -1061,6 +1074,9 @@ struct ipv6conf { /** Retransmission timer */ struct retry_timer timer; + + /** Deferred discovery counter */ + unsigned int deferred; }; /** List of IPv6 configurators */ @@ -1124,6 +1140,7 @@ static void ipv6conf_done ( struct ipv6conf *ipv6conf, int rc ) { static void ipv6conf_expired ( struct retry_timer *timer, int fail ) { struct ipv6conf *ipv6conf = container_of ( timer, struct ipv6conf, timer ); + struct net_device *netdev = ipv6conf->netdev; /* If we have failed, terminate autoconfiguration */ if ( fail ) { @@ -1133,7 +1150,15 @@ static void ipv6conf_expired ( struct retry_timer *timer, int fail ) { /* Otherwise, transmit router solicitation and restart timer */ start_timer ( &ipv6conf->timer ); - ndp_tx_router_solicitation ( ipv6conf->netdev ); + ndp_tx_router_solicitation ( netdev ); + + /* If link is blocked, defer router discovery timeout */ + if ( netdev_link_blocked ( netdev ) && + ( ipv6conf->deferred++ <= IPV6CONF_MAX_DEFERRALS ) ) { + DBGC ( netdev, "NDP %s deferring discovery timeout\n", + netdev->name ); + start_timer_fixed ( &ipv6conf->timer, IPV6CONF_BLOCK_TIMEOUT ); + } } /** @@ -1235,6 +1260,8 @@ int start_ipv6conf ( struct interface *job, struct net_device *netdev ) { intf_init ( &ipv6conf->job, &ipv6conf_job_desc, &ipv6conf->refcnt ); intf_init ( &ipv6conf->dhcp, &ipv6conf_dhcp_desc, &ipv6conf->refcnt ); timer_init ( &ipv6conf->timer, ipv6conf_expired, &ipv6conf->refcnt ); + set_timer_limits ( &ipv6conf->timer, IPV6CONF_MIN_TIMEOUT, + IPV6CONF_MAX_TIMEOUT ); ipv6conf->netdev = netdev_get ( netdev ); /* Start timer to initiate router solicitation */ diff --git a/src/net/netdevice.c b/src/net/netdevice.c index 3b02e64bd..5df306e8d 100644 --- a/src/net/netdevice.c +++ b/src/net/netdevice.c @@ -297,24 +297,45 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) { /* Enqueue packet */ list_add_tail ( &iobuf->list, &netdev->tx_queue ); + /* Guard against re-entry */ + if ( netdev->state & NETDEV_TX_IN_PROGRESS ) { + rc = -EBUSY; + goto err_busy; + } + netdev->state |= NETDEV_TX_IN_PROGRESS; + /* Avoid calling transmit() on unopened network devices */ if ( ! netdev_is_open ( netdev ) ) { rc = -ENETUNREACH; - goto err; + goto err_closed; } /* Discard packet (for test purposes) if applicable */ if ( ( rc = inject_fault ( NETDEV_DISCARD_RATE ) ) != 0 ) - goto err; + goto err_fault; + + /* Map for DMA, if required */ + if ( netdev->dma && ( ! dma_mapped ( &iobuf->map ) ) ) { + if ( ( rc = iob_map_tx ( iobuf, netdev->dma ) ) != 0 ) + goto err_map; + } /* Transmit packet */ if ( ( rc = netdev->op->transmit ( netdev, iobuf ) ) != 0 ) - goto err; + goto err_transmit; + + /* Clear in-progress flag */ + netdev->state &= ~NETDEV_TX_IN_PROGRESS; profile_stop ( &net_tx_profiler ); return 0; - err: + err_transmit: + err_map: + err_fault: + err_closed: + netdev->state &= ~NETDEV_TX_IN_PROGRESS; + err_busy: netdev_tx_complete_err ( netdev, iobuf, rc ); return rc; } @@ -340,6 +361,9 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) { * Failure to do this will cause the retransmitted packet to be * immediately redeferred (which will result in out-of-order * transmissions and other nastiness). + * + * I/O buffers that have been mapped for DMA will remain mapped while + * present in the deferred transmit queue. */ void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) { @@ -365,6 +389,9 @@ void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) { * * The packet is discarded and a TX error is recorded. This function * takes ownership of the I/O buffer. + * + * The I/O buffer will be automatically unmapped for DMA, if + * applicable. */ void netdev_tx_err ( struct net_device *netdev, struct io_buffer *iobuf, int rc ) { @@ -379,6 +406,10 @@ void netdev_tx_err ( struct net_device *netdev, netdev->name, iobuf, strerror ( rc ) ); } + /* Unmap I/O buffer, if required */ + if ( iobuf && dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); + /* Discard packet */ free_iob ( iobuf ); } @@ -462,10 +493,13 @@ static void netdev_tx_flush ( struct net_device *netdev ) { * Add packet to receive queue * * @v netdev Network device - * @v iobuf I/O buffer, or NULL + * @v iobuf I/O buffer * * The packet is added to the network device's RX queue. This * function takes ownership of the I/O buffer. + * + * The I/O buffer will be automatically unmapped for DMA, if + * applicable. */ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) { int rc; @@ -479,6 +513,10 @@ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) { return; } + /* Unmap I/O buffer, if required */ + if ( dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); + /* Enqueue packet */ list_add_tail ( &iobuf->list, &netdev->rx_queue ); @@ -497,6 +535,9 @@ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) { * takes ownership of the I/O buffer. @c iobuf may be NULL if, for * example, the net device wishes to report an error due to being * unable to allocate an I/O buffer. + * + * The I/O buffer will be automatically unmapped for DMA, if + * applicable. */ void netdev_rx_err ( struct net_device *netdev, struct io_buffer *iobuf, int rc ) { @@ -504,6 +545,10 @@ void netdev_rx_err ( struct net_device *netdev, DBGC ( netdev, "NETDEV %s failed to receive %p: %s\n", netdev->name, iobuf, strerror ( rc ) ); + /* Unmap I/O buffer, if required */ + if ( iobuf && dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); + /* Discard packet */ free_iob ( iobuf ); @@ -522,8 +567,18 @@ void netdev_rx_err ( struct net_device *netdev, */ void netdev_poll ( struct net_device *netdev ) { - if ( netdev_is_open ( netdev ) ) - netdev->op->poll ( netdev ); + /* Avoid calling poll() on unopened network devices */ + if ( ! netdev_is_open ( netdev ) ) + return; + + /* Guard against re-entry */ + if ( netdev->state & NETDEV_POLL_IN_PROGRESS ) + return; + + /* Poll device */ + netdev->state |= NETDEV_POLL_IN_PROGRESS; + netdev->op->poll ( netdev ); + netdev->state &= ~NETDEV_POLL_IN_PROGRESS; } /** @@ -1178,6 +1233,8 @@ static unsigned int net_discard ( void ) { /* Discard first deferred packet */ list_del ( &iobuf->list ); + if ( dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); free_iob ( iobuf ); /* Report discard */ diff --git a/src/net/peerdisc.c b/src/net/peerdisc.c index 55e3f7fa7..d7e0d2989 100644 --- a/src/net/peerdisc.c +++ b/src/net/peerdisc.c @@ -73,6 +73,9 @@ static LIST_HEAD ( peerdisc_segments ); */ unsigned int peerdisc_timeout_secs = PEERDISC_DEFAULT_TIMEOUT_SECS; +/** Most recently discovered peer (for any block) */ +static char *peerdisc_recent; + /** Hosted cache server */ static char *peerhost; @@ -383,6 +386,7 @@ static int peerdisc_discovered ( struct peerdisc_segment *segment, struct peerdisc_peer *peer; struct peerdisc_client *peerdisc; struct peerdisc_client *tmp; + char *recent; /* Ignore duplicate peers */ list_for_each_entry ( peer, &segment->peers, list ) { @@ -403,6 +407,15 @@ static int peerdisc_discovered ( struct peerdisc_segment *segment, /* Add to end of list of peers */ list_add_tail ( &peer->list, &segment->peers ); + /* Record as most recently discovered peer */ + if ( location != peerdisc_recent ) { + recent = strdup ( location ); + if ( recent ) { + free ( peerdisc_recent ); + peerdisc_recent = recent; + } + } + /* Notify all clients */ list_for_each_entry_safe ( peerdisc, tmp, &segment->clients, list ) peerdisc->op->discovered ( peerdisc ); @@ -484,6 +497,16 @@ static struct peerdisc_segment * peerdisc_create ( const char *id ) { } else { + /* Add most recently discovered peer to list of peers + * + * This is a performance optimisation: we assume that + * the most recently discovered peer for any block has + * a high probability of also having a copy of the + * next block that we attempt to discover. + */ + if ( peerdisc_recent ) + peerdisc_discovered ( segment, peerdisc_recent ); + /* Start discovery timer */ start_timer_nodelay ( &segment->timer ); DBGC2 ( segment, "PEERDISC %p discovering %s\n", diff --git a/src/net/ping.c b/src/net/ping.c index 3f4fa5c11..f0729e159 100644 --- a/src/net/ping.c +++ b/src/net/ping.c @@ -259,17 +259,9 @@ static int ping_open ( struct interface *xfer, struct sockaddr *peer, return rc; } -/** Ping IPv4 socket opener */ -struct socket_opener ping_ipv4_socket_opener __socket_opener = { +/** Ping socket opener */ +struct socket_opener ping_socket_opener __socket_opener = { .semantics = PING_SOCK_ECHO, - .family = AF_INET, - .open = ping_open, -}; - -/** Ping IPv6 socket opener */ -struct socket_opener ping_ipv6_socket_opener __socket_opener = { - .semantics = PING_SOCK_ECHO, - .family = AF_INET6, .open = ping_open, }; diff --git a/src/net/tcp.c b/src/net/tcp.c index 6bba44282..2a98221f6 100644 --- a/src/net/tcp.c +++ b/src/net/tcp.c @@ -1743,17 +1743,9 @@ static struct interface_descriptor tcp_xfer_desc = *************************************************************************** */ -/** TCP IPv4 socket opener */ -struct socket_opener tcp_ipv4_socket_opener __socket_opener = { +/** TCP socket opener */ +struct socket_opener tcp_socket_opener __socket_opener = { .semantics = TCP_SOCK_STREAM, - .family = AF_INET, - .open = tcp_open, -}; - -/** TCP IPv6 socket opener */ -struct socket_opener tcp_ipv6_socket_opener __socket_opener = { - .semantics = TCP_SOCK_STREAM, - .family = AF_INET6, .open = tcp_open, }; diff --git a/src/net/tcp/httpconn.c b/src/net/tcp/httpconn.c index 5121ff6c2..538c4dcf6 100644 --- a/src/net/tcp/httpconn.c +++ b/src/net/tcp/httpconn.c @@ -32,6 +32,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include +#include #include #include #include @@ -63,7 +64,7 @@ static struct http_scheme * http_scheme ( struct uri *uri ) { /* Identify scheme */ for_each_table_entry ( scheme, HTTP_SCHEMES ) { - if ( strcmp ( uri->scheme, scheme->name ) == 0 ) + if ( strcasecmp ( uri->scheme, scheme->name ) == 0 ) return scheme; } @@ -236,7 +237,6 @@ int http_connect ( struct interface *xfer, struct uri *uri ) { struct http_connection *conn; struct http_scheme *scheme; struct sockaddr_tcpip server; - struct interface *socket; unsigned int port; int rc; @@ -296,15 +296,15 @@ int http_connect ( struct interface *xfer, struct uri *uri ) { /* Open socket */ memset ( &server, 0, sizeof ( server ) ); server.st_port = htons ( port ); - socket = &conn->socket; - if ( scheme->filter && - ( ( rc = scheme->filter ( socket, uri->host, &socket ) ) != 0 ) ) - goto err_filter; - if ( ( rc = xfer_open_named_socket ( socket, SOCK_STREAM, + if ( ( rc = xfer_open_named_socket ( &conn->socket, SOCK_STREAM, ( struct sockaddr * ) &server, uri->host, NULL ) ) != 0 ) goto err_open; + /* Add filter, if any */ + if ( scheme->filter && ( ( rc = scheme->filter ( conn ) ) != 0 ) ) + goto err_filter; + /* Attach to parent interface, mortalise self, and return */ intf_plug_plug ( &conn->xfer, xfer ); ref_put ( &conn->refcnt ); @@ -313,8 +313,8 @@ int http_connect ( struct interface *xfer, struct uri *uri ) { conn->scheme->name, conn->uri->host, port ); return 0; - err_open: err_filter: + err_open: DBGC2 ( conn, "HTTPCONN %p could not create %s://%s:%d: %s\n", conn, conn->scheme->name, conn->uri->host, port, strerror ( rc ) ); http_conn_close ( conn, rc ); diff --git a/src/net/tcp/httpcore.c b/src/net/tcp/httpcore.c index f755fb72d..01bb496b2 100644 --- a/src/net/tcp/httpcore.c +++ b/src/net/tcp/httpcore.c @@ -56,6 +56,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include /* Disambiguate the various error causes */ @@ -519,6 +520,18 @@ __weak int http_block_read_capacity ( struct http_transaction *http __unused, return -ENOTSUP; } +/** + * Describe as an EFI device path + * + * @v http HTTP transaction + * @ret path EFI device path, or NULL on error + */ +static EFI_DEVICE_PATH_PROTOCOL * +http_efi_describe ( struct http_transaction *http ) { + + return efi_uri_path ( http->uri ); +} + /** HTTP data transfer interface operations */ static struct interface_operation http_xfer_operations[] = { INTF_OP ( block_read, struct http_transaction *, http_block_read ), @@ -526,6 +539,8 @@ static struct interface_operation http_xfer_operations[] = { http_block_read_capacity ), INTF_OP ( xfer_window_changed, struct http_transaction *, http_step ), INTF_OP ( intf_close, struct http_transaction *, http_close ), + EFI_INTF_OP ( efi_describe, struct http_transaction *, + http_efi_describe ), }; /** HTTP data transfer interface descriptor */ diff --git a/src/net/tcp/https.c b/src/net/tcp/https.c index e91000322..85f1f124f 100644 --- a/src/net/tcp/https.c +++ b/src/net/tcp/https.c @@ -31,12 +31,24 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); */ #include +#include #include #include #include FEATURE ( FEATURE_PROTOCOL, "HTTPS", DHCP_EB_FEATURE_HTTPS, 1 ); +/** + * Add HTTPS filter + * + * @v conn HTTP connection + * @ret rc Return status code + */ +static int https_filter ( struct http_connection *conn ) { + + return add_tls ( &conn->socket, conn->uri->host, NULL, NULL ); +} + /** HTTPS URI opener */ struct uri_opener https_uri_opener __uri_opener = { .scheme = "https", @@ -47,5 +59,5 @@ struct uri_opener https_uri_opener __uri_opener = { struct http_scheme https_scheme __http_scheme = { .name = "https", .port = HTTPS_PORT, - .filter = add_tls, + .filter = https_filter, }; diff --git a/src/net/tcp/iscsi.c b/src/net/tcp/iscsi.c index f8379b285..e36d5619d 100644 --- a/src/net/tcp/iscsi.c +++ b/src/net/tcp/iscsi.c @@ -46,6 +46,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include /** @file @@ -980,18 +981,26 @@ static int iscsi_handle_chap_i_value ( struct iscsi_session *iscsi, */ static int iscsi_handle_chap_c_value ( struct iscsi_session *iscsi, const char *value ) { - uint8_t buf[ strlen ( value ) ]; /* Decoding never expands data */ + uint8_t *buf; unsigned int i; int len; int rc; + /* Allocate decoding buffer */ + len = strlen ( value ); /* Decoding never expands data */ + buf = malloc ( len ); + if ( ! buf ) { + rc = -ENOMEM; + goto err_alloc; + } + /* Process challenge */ - len = iscsi_large_binary_decode ( value, buf, sizeof ( buf ) ); + len = iscsi_large_binary_decode ( value, buf, len ); if ( len < 0 ) { rc = len; DBGC ( iscsi, "iSCSI %p invalid CHAP challenge \"%s\": %s\n", iscsi, value, strerror ( rc ) ); - return rc; + goto err_decode; } chap_update ( &iscsi->chap, buf, len ); @@ -1009,7 +1018,13 @@ static int iscsi_handle_chap_c_value ( struct iscsi_session *iscsi, } } - return 0; + /* Success */ + rc = 0; + + err_decode: + free ( buf ); + err_alloc: + return rc; } /** @@ -1050,7 +1065,7 @@ static int iscsi_handle_chap_n_value ( struct iscsi_session *iscsi, */ static int iscsi_handle_chap_r_value ( struct iscsi_session *iscsi, const char *value ) { - uint8_t buf[ strlen ( value ) ]; /* Decoding never expands data */ + uint8_t *buf; int len; int rc; @@ -1059,7 +1074,7 @@ static int iscsi_handle_chap_r_value ( struct iscsi_session *iscsi, if ( ( rc = chap_init ( &iscsi->chap, &md5_algorithm ) ) != 0 ) { DBGC ( iscsi, "iSCSI %p could not initialise CHAP: %s\n", iscsi, strerror ( rc ) ); - return rc; + goto err_chap_init; } chap_set_identifier ( &iscsi->chap, iscsi->chap_challenge[0] ); if ( iscsi->target_password ) { @@ -1070,31 +1085,47 @@ static int iscsi_handle_chap_r_value ( struct iscsi_session *iscsi, ( sizeof ( iscsi->chap_challenge ) - 1 ) ); chap_respond ( &iscsi->chap ); + /* Allocate decoding buffer */ + len = strlen ( value ); /* Decoding never expands data */ + buf = malloc ( len ); + if ( ! buf ) { + rc = -ENOMEM; + goto err_alloc; + } + /* Process response */ - len = iscsi_large_binary_decode ( value, buf, sizeof ( buf ) ); + len = iscsi_large_binary_decode ( value, buf, len ); if ( len < 0 ) { rc = len; DBGC ( iscsi, "iSCSI %p invalid CHAP response \"%s\": %s\n", iscsi, value, strerror ( rc ) ); - return rc; + goto err_decode; } /* Check CHAP response */ if ( len != ( int ) iscsi->chap.response_len ) { DBGC ( iscsi, "iSCSI %p invalid CHAP response length\n", iscsi ); - return -EPROTO_INVALID_CHAP_RESPONSE; + rc = -EPROTO_INVALID_CHAP_RESPONSE; + goto err_response_len; } if ( memcmp ( buf, iscsi->chap.response, len ) != 0 ) { DBGC ( iscsi, "iSCSI %p incorrect CHAP response \"%s\"\n", iscsi, value ); - return -EACCES_INCORRECT_TARGET_PASSWORD; + rc = -EACCES_INCORRECT_TARGET_PASSWORD; + goto err_response; } /* Mark session as authenticated */ iscsi->status |= ISCSI_STATUS_AUTH_REVERSE_OK; - return 0; + err_response: + err_response_len: + err_decode: + free ( buf ); + err_alloc: + err_chap_init: + return rc; } /** An iSCSI text string that we want to handle */ @@ -1833,6 +1864,7 @@ static struct interface_operation iscsi_control_op[] = { INTF_OP ( xfer_window, struct iscsi_session *, iscsi_scsi_window ), INTF_OP ( intf_close, struct iscsi_session *, iscsi_close ), INTF_OP ( acpi_describe, struct iscsi_session *, iscsi_describe ), + EFI_INTF_OP ( efi_describe, struct iscsi_session *, efi_iscsi_path ), }; /** iSCSI SCSI command-issuing interface descriptor */ @@ -1918,15 +1950,22 @@ const struct setting reverse_password_setting __setting ( SETTING_AUTH_EXTRA, */ static int iscsi_parse_root_path ( struct iscsi_session *iscsi, const char *root_path ) { - char rp_copy[ strlen ( root_path ) + 1 ]; + char *rp_copy; char *rp_comp[NUM_RP_COMPONENTS]; - char *rp = rp_copy; + char *rp; int skip = 0; int i = 0; int rc; + /* Create modifiable copy of root path */ + rp_copy = strdup ( root_path ); + if ( ! rp_copy ) { + rc = -ENOMEM; + goto err_strdup; + } + rp = rp_copy; + /* Split root path into component parts */ - strcpy ( rp_copy, root_path ); while ( 1 ) { rp_comp[i++] = rp; if ( i == NUM_RP_COMPONENTS ) @@ -1935,7 +1974,8 @@ static int iscsi_parse_root_path ( struct iscsi_session *iscsi, if ( ! *rp ) { DBGC ( iscsi, "iSCSI %p root path \"%s\" " "too short\n", iscsi, root_path ); - return -EINVAL_ROOT_PATH_TOO_SHORT; + rc = -EINVAL_ROOT_PATH_TOO_SHORT; + goto err_split; } else if ( *rp == '[' ) { skip = 1; } else if ( *rp == ']' ) { @@ -1947,21 +1987,31 @@ static int iscsi_parse_root_path ( struct iscsi_session *iscsi, /* Use root path components to configure iSCSI session */ iscsi->target_address = strdup ( rp_comp[RP_SERVERNAME] ); - if ( ! iscsi->target_address ) - return -ENOMEM; + if ( ! iscsi->target_address ) { + rc = -ENOMEM; + goto err_servername; + } iscsi->target_port = strtoul ( rp_comp[RP_PORT], NULL, 10 ); if ( ! iscsi->target_port ) iscsi->target_port = ISCSI_PORT; if ( ( rc = scsi_parse_lun ( rp_comp[RP_LUN], &iscsi->lun ) ) != 0 ) { DBGC ( iscsi, "iSCSI %p invalid LUN \"%s\"\n", iscsi, rp_comp[RP_LUN] ); - return rc; + goto err_lun; } iscsi->target_iqn = strdup ( rp_comp[RP_TARGETNAME] ); - if ( ! iscsi->target_iqn ) - return -ENOMEM; + if ( ! iscsi->target_iqn ) { + rc = -ENOMEM; + goto err_targetname; + } - return 0; + err_targetname: + err_lun: + err_servername: + err_split: + free ( rp_copy ); + err_strdup: + return rc; } /** diff --git a/src/net/tcp/syslogs.c b/src/net/tcp/syslogs.c index 0c07f86d5..f1f70d59e 100644 --- a/src/net/tcp/syslogs.c +++ b/src/net/tcp/syslogs.c @@ -62,9 +62,10 @@ static struct sockaddr_tcpip logserver = { * @v intf Interface * @v rc Reason for close */ -static void syslogs_close ( struct interface *intf __unused, int rc ) { +static void syslogs_close ( struct interface *intf, int rc ) { DBG ( "SYSLOGS console disconnected: %s\n", strerror ( rc ) ); + intf_restart ( intf, rc ); } /** @@ -208,7 +209,6 @@ const struct setting syslogs_setting __setting ( SETTING_MISC, syslogs ) = { static int apply_syslogs_settings ( void ) { static char *old_server; char *server; - struct interface *socket; int rc; /* Fetch log server */ @@ -234,33 +234,32 @@ static int apply_syslogs_settings ( void ) { rc = 0; goto out_no_server; } - - /* Add TLS filter */ - if ( ( rc = add_tls ( &syslogs, server, &socket ) ) != 0 ) { - DBG ( "SYSLOGS cannot create TLS filter: %s\n", - strerror ( rc ) ); - goto err_add_tls; - } + DBG ( "SYSLOGS using log server %s\n", server ); /* Connect to log server */ - if ( ( rc = xfer_open_named_socket ( socket, SOCK_STREAM, + if ( ( rc = xfer_open_named_socket ( &syslogs, SOCK_STREAM, (( struct sockaddr *) &logserver ), server, NULL ) ) != 0 ) { DBG ( "SYSLOGS cannot connect to log server: %s\n", strerror ( rc ) ); goto err_open_named_socket; } - DBG ( "SYSLOGS using log server %s\n", server ); + + /* Add TLS filter */ + if ( ( rc = add_tls ( &syslogs, server, NULL, NULL ) ) != 0 ) { + DBG ( "SYSLOGS cannot create TLS filter: %s\n", + strerror ( rc ) ); + goto err_add_tls; + } /* Record log server */ old_server = server; - server = NULL; - /* Success */ - rc = 0; + return 0; - err_open_named_socket: err_add_tls: + err_open_named_socket: + syslogs_close ( &syslogs, rc ); out_no_server: out_no_change: free ( server ); diff --git a/src/net/tls.c b/src/net/tls.c index 12045b01e..3c4144450 100644 --- a/src/net/tls.c +++ b/src/net/tls.c @@ -45,10 +45,12 @@ FILE_LICENCE ( GPL2_OR_LATER ); #include #include #include +#include #include #include #include #include +#include /* Disambiguate the various error causes */ #define EINVAL_CHANGE_CIPHER __einfo_error ( EINFO_EINVAL_CHANGE_CIPHER ) @@ -244,6 +246,23 @@ static int tls_ready ( struct tls_connection *tls ) { ( ! is_pending ( &tls->server_negotiation ) ) ); } +/** + * Check for TLS version + * + * @v tls TLS connection + * @v version TLS version + * @ret at_least TLS connection is using at least the specified version + * + * Check that TLS connection uses at least the specified protocol + * version. Optimise down to a compile-time constant true result if + * this is already guaranteed by the minimum supported version check. + */ +static inline __attribute__ (( always_inline )) int +tls_version ( struct tls_connection *tls, unsigned int version ) { + return ( ( TLS_VERSION_MIN >= version ) || + ( tls->version >= version ) ); +} + /****************************************************************************** * * Hybrid MD5+SHA1 hash as used by TLSv1.1 and earlier @@ -331,7 +350,9 @@ static void free_tls_session ( struct refcnt *refcnt ) { /* Remove from list of sessions */ list_del ( &session->list ); - /* Free session ticket */ + /* Free dynamically-allocated resources */ + x509_root_put ( session->root ); + privkey_put ( session->key ); free ( session->ticket ); /* Free session */ @@ -360,8 +381,10 @@ static void free_tls ( struct refcnt *refcnt ) { list_del ( &iobuf->list ); free_iob ( iobuf ); } - x509_put ( tls->cert ); + x509_chain_put ( tls->certs ); x509_chain_put ( tls->chain ); + x509_root_put ( tls->root ); + privkey_put ( tls->key ); /* Drop reference to session */ assert ( list_empty ( &tls->list ) ); @@ -540,7 +563,7 @@ static void tls_prf ( struct tls_connection *tls, void *secret, va_start ( seeds, out_len ); - if ( tls->version >= TLS_VERSION_TLS_1_2 ) { + if ( tls_version ( tls, TLS_VERSION_TLS_1_2 ) ) { /* Use P_SHA256 for TLSv1.2 and later */ tls_p_hash_va ( tls, &sha256_algorithm, secret, secret_len, out, out_len, seeds ); @@ -1129,41 +1152,57 @@ static int tls_send_client_hello ( struct tls_connection *tls ) { * @ret rc Return status code */ static int tls_send_certificate ( struct tls_connection *tls ) { + struct { + tls24_t length; + uint8_t data[0]; + } __attribute__ (( packed )) *certificate; struct { uint32_t type_length; tls24_t length; - struct { - tls24_t length; - uint8_t data[ tls->cert->raw.len ]; - } __attribute__ (( packed )) certificates[1]; - } __attribute__ (( packed )) *certificate; + typeof ( *certificate ) certificates[0]; + } __attribute__ (( packed )) *certificates; + struct x509_link *link; + struct x509_certificate *cert; + size_t len; int rc; + /* Calculate length of client certificates */ + len = 0; + list_for_each_entry ( link, &tls->certs->links, list ) { + cert = link->cert; + len += ( sizeof ( *certificate ) + cert->raw.len ); + DBGC ( tls, "TLS %p sending client certificate %s\n", + tls, x509_name ( cert ) ); + } + /* Allocate storage for Certificate record (which may be too * large for the stack). */ - certificate = zalloc ( sizeof ( *certificate ) ); - if ( ! certificate ) + certificates = zalloc ( sizeof ( *certificates ) + len ); + if ( ! certificates ) return -ENOMEM_CERTIFICATE; /* Populate record */ - certificate->type_length = + certificates->type_length = ( cpu_to_le32 ( TLS_CERTIFICATE ) | - htonl ( sizeof ( *certificate ) - - sizeof ( certificate->type_length ) ) ); - tls_set_uint24 ( &certificate->length, - sizeof ( certificate->certificates ) ); - tls_set_uint24 ( &certificate->certificates[0].length, - sizeof ( certificate->certificates[0].data ) ); - memcpy ( certificate->certificates[0].data, - tls->cert->raw.data, - sizeof ( certificate->certificates[0].data ) ); + htonl ( sizeof ( *certificates ) + len - + sizeof ( certificates->type_length ) ) ); + tls_set_uint24 ( &certificates->length, len ); + certificate = &certificates->certificates[0]; + list_for_each_entry ( link, &tls->certs->links, list ) { + cert = link->cert; + tls_set_uint24 ( &certificate->length, cert->raw.len ); + memcpy ( certificate->data, cert->raw.data, cert->raw.len ); + certificate = ( ( ( void * ) certificate->data ) + + cert->raw.len ); + } /* Transmit record */ - rc = tls_send_handshake ( tls, certificate, sizeof ( *certificate ) ); + rc = tls_send_handshake ( tls, certificates, + ( sizeof ( *certificates ) + len ) ); /* Free record */ - free ( certificate ); + free ( certificates ); return rc; } @@ -1220,8 +1259,9 @@ static int tls_send_client_key_exchange ( struct tls_connection *tls ) { */ static int tls_send_certificate_verify ( struct tls_connection *tls ) { struct digest_algorithm *digest = tls->handshake_digest; - struct x509_certificate *cert = tls->cert; + struct x509_certificate *cert = x509_first ( tls->certs ); struct pubkey_algorithm *pubkey = cert->signature_algorithm->pubkey; + struct asn1_cursor *key = privkey_cursor ( tls->key ); uint8_t digest_out[ digest->digestsize ]; uint8_t ctx[ pubkey->ctxsize ]; struct tls_signature_hash_algorithm *sig_hash = NULL; @@ -1231,15 +1271,14 @@ static int tls_send_certificate_verify ( struct tls_connection *tls ) { tls_verify_handshake ( tls, digest_out ); /* Initialise public-key algorithm */ - if ( ( rc = pubkey_init ( pubkey, ctx, private_key.data, - private_key.len ) ) != 0 ) { + if ( ( rc = pubkey_init ( pubkey, ctx, key->data, key->len ) ) != 0 ) { DBGC ( tls, "TLS %p could not initialise %s client private " "key: %s\n", tls, pubkey->name, strerror ( rc ) ); goto err_pubkey_init; } /* TLSv1.2 and later use explicit algorithm identifiers */ - if ( tls->version >= TLS_VERSION_TLS_1_2 ) { + if ( tls_version ( tls, TLS_VERSION_TLS_1_2 ) ) { sig_hash = tls_signature_hash_algorithm ( pubkey, digest ); if ( ! sig_hash ) { DBGC ( tls, "TLS %p could not identify (%s,%s) " @@ -1558,7 +1597,7 @@ static int tls_new_server_hello ( struct tls_connection *tls, /* Check and store protocol version */ version = ntohs ( hello_a->version ); - if ( version < TLS_VERSION_TLS_1_0 ) { + if ( version < TLS_VERSION_MIN ) { DBGC ( tls, "TLS %p does not support protocol version %d.%d\n", tls, ( version >> 8 ), ( version & 0xff ) ); return -ENOTSUP_VERSION; @@ -1576,7 +1615,7 @@ static int tls_new_server_hello ( struct tls_connection *tls, /* Use MD5+SHA1 digest algorithm for handshake verification * for versions earlier than TLSv1.2. */ - if ( tls->version < TLS_VERSION_TLS_1_2 ) { + if ( ! tls_version ( tls, TLS_VERSION_TLS_1_2 ) ) { tls->handshake_digest = &md5_sha1_algorithm; tls->handshake_ctx = tls->handshake_md5_sha1_ctx; } @@ -1827,26 +1866,57 @@ static int tls_new_certificate ( struct tls_connection *tls, static int tls_new_certificate_request ( struct tls_connection *tls, const void *data __unused, size_t len __unused ) { + struct x509_certificate *cert; + int rc; /* We can only send a single certificate, so there is no point * in parsing the Certificate Request. */ - /* Free any existing client certificate */ - x509_put ( tls->cert ); + /* Free any existing client certificate chain */ + x509_chain_put ( tls->certs ); + tls->certs = NULL; /* Determine client certificate to be sent */ - tls->cert = certstore_find_key ( &private_key ); - if ( ! tls->cert ) { + cert = certstore_find_key ( tls->key ); + if ( ! cert ) { DBGC ( tls, "TLS %p could not find certificate corresponding " "to private key\n", tls ); - return -EPERM_CLIENT_CERT; + rc = -EPERM_CLIENT_CERT; + goto err_find; } - x509_get ( tls->cert ); - DBGC ( tls, "TLS %p sending client certificate %s\n", - tls, x509_name ( tls->cert ) ); + x509_get ( cert ); + DBGC ( tls, "TLS %p selected client certificate %s\n", + tls, x509_name ( cert ) ); + + /* Create client certificate chain */ + tls->certs = x509_alloc_chain(); + if ( ! tls->certs ) { + rc = -ENOMEM; + goto err_alloc; + } + + /* Append client certificate to chain */ + if ( ( rc = x509_append ( tls->certs, cert ) ) != 0 ) + goto err_append; + + /* Append any relevant issuer certificates */ + if ( ( rc = x509_auto_append ( tls->certs, &certstore ) ) != 0 ) + goto err_auto_append; + + /* Drop local reference to client certificate */ + x509_put ( cert ); return 0; + + err_auto_append: + err_append: + x509_chain_put ( tls->certs ); + tls->certs = NULL; + err_alloc: + x509_put ( cert ); + err_find: + return rc; } /** @@ -1873,7 +1943,8 @@ static int tls_new_server_hello_done ( struct tls_connection *tls, } /* Begin certificate validation */ - if ( ( rc = create_validator ( &tls->validator, tls->chain ) ) != 0 ) { + if ( ( rc = create_validator ( &tls->validator, tls->chain, + tls->root ) ) != 0 ) { DBGC ( tls, "TLS %p could not start certificate validation: " "%s\n", tls, strerror ( rc ) ); return rc; @@ -2258,7 +2329,7 @@ static void * tls_assemble_block ( struct tls_connection *tls, void *padding; /* TLSv1.1 and later use an explicit IV */ - iv_len = ( ( tls->version >= TLS_VERSION_TLS_1_1 ) ? blocksize : 0 ); + iv_len = ( tls_version ( tls, TLS_VERSION_TLS_1_1 ) ? blocksize : 0 ); /* Calculate block-ciphered struct length */ padding_len = ( ( blocksize - 1 ) & -( iv_len + len + mac_len + 1 ) ); @@ -2420,7 +2491,7 @@ static int tls_split_block ( struct tls_connection *tls, /* TLSv1.1 and later use an explicit IV */ iobuf = list_first_entry ( rx_data, struct io_buffer, list ); - iv_len = ( ( tls->version >= TLS_VERSION_TLS_1_1 ) ? + iv_len = ( tls_version ( tls, TLS_VERSION_TLS_1_1 ) ? tls->rx_cipherspec.suite->cipher->blocksize : 0 ); if ( iob_len ( iobuf ) < iv_len ) { DBGC ( tls, "TLS %p received underlength IV\n", tls ); @@ -2862,7 +2933,7 @@ static void tls_validator_done ( struct tls_connection *tls, int rc ) { tls->tx_pending |= ( TLS_TX_CLIENT_KEY_EXCHANGE | TLS_TX_CHANGE_CIPHER | TLS_TX_FINISHED ); - if ( tls->cert ) { + if ( tls->certs ) { tls->tx_pending |= ( TLS_TX_CERTIFICATE | TLS_TX_CERTIFICATE_VERIFY ); } @@ -3030,7 +3101,9 @@ static int tls_session ( struct tls_connection *tls, const char *name ) { /* Find existing matching session, if any */ list_for_each_entry ( session, &tls_sessions, list ) { - if ( strcmp ( name, session->name ) == 0 ) { + if ( ( strcmp ( name, session->name ) == 0 ) && + ( tls->root == session->root ) && + ( tls->key == session->key ) ) { ref_get ( &session->refcnt ); tls->session = session; DBGC ( tls, "TLS %p joining session %s\n", tls, name ); @@ -3049,6 +3122,8 @@ static int tls_session ( struct tls_connection *tls, const char *name ) { name_copy = ( ( ( void * ) session ) + sizeof ( *session ) ); strcpy ( name_copy, name ); session->name = name_copy; + session->root = x509_root_get ( tls->root ); + session->key = privkey_get ( tls->key ); INIT_LIST_HEAD ( &session->conn ); list_add ( &session->list, &tls_sessions ); @@ -3070,8 +3145,17 @@ static int tls_session ( struct tls_connection *tls, const char *name ) { ****************************************************************************** */ +/** + * Add TLS on an interface + * + * @v xfer Data transfer interface + * @v name Host name + * @v root Root of trust (or NULL to use default) + * @v key Private key (or NULL to use default) + * @ret rc Return status code + */ int add_tls ( struct interface *xfer, const char *name, - struct interface **next ) { + struct x509_root *root, struct private_key *key ) { struct tls_connection *tls; int rc; @@ -3089,6 +3173,8 @@ int add_tls ( struct interface *xfer, const char *name, intf_init ( &tls->validator, &tls_validator_desc, &tls->refcnt ); process_init_stopped ( &tls->process, &tls_process_desc, &tls->refcnt ); + tls->key = privkey_get ( key ? key : &private_key ); + tls->root = x509_root_get ( root ? root : &root_certificates ); tls->version = TLS_VERSION_TLS_1_2; tls_clear_cipher ( tls, &tls->tx_cipherspec ); tls_clear_cipher ( tls, &tls->tx_cipherspec_pending ); @@ -3115,8 +3201,7 @@ int add_tls ( struct interface *xfer, const char *name, tls_restart ( tls ); /* Attach to parent interface, mortalise self, and return */ - intf_plug_plug ( &tls->plainstream, xfer ); - *next = &tls->cipherstream; + intf_insert ( xfer, &tls->plainstream, &tls->cipherstream ); ref_put ( &tls->refcnt ); return 0; diff --git a/src/net/udp.c b/src/net/udp.c index 1fbc12d48..2c0b343dc 100644 --- a/src/net/udp.c +++ b/src/net/udp.c @@ -396,17 +396,9 @@ static struct interface_descriptor udp_xfer_desc = *************************************************************************** */ -/** UDP IPv4 socket opener */ -struct socket_opener udp_ipv4_socket_opener __socket_opener = { +/** UDP socket opener */ +struct socket_opener udp_socket_opener __socket_opener = { .semantics = UDP_SOCK_DGRAM, - .family = AF_INET, - .open = udp_open, -}; - -/** UDP IPv6 socket opener */ -struct socket_opener udp_ipv6_socket_opener __socket_opener = { - .semantics = UDP_SOCK_DGRAM, - .family = AF_INET6, .open = udp_open, }; diff --git a/src/net/udp/dhcp.c b/src/net/udp/dhcp.c index 3a3666c9a..a335a778a 100644 --- a/src/net/udp/dhcp.c +++ b/src/net/udp/dhcp.c @@ -443,6 +443,26 @@ static void dhcp_discovery_rx ( struct dhcp_session *dhcp, dhcp_set_state ( dhcp, &dhcp_state_request ); } +/** + * Defer DHCP discovery + * + * @v dhcp DHCP session + */ +static void dhcp_defer ( struct dhcp_session *dhcp ) { + + /* Do nothing if we have reached the deferral limit */ + if ( dhcp->count > DHCP_DISC_MAX_DEFERRALS ) + return; + + /* Return to discovery state */ + DBGC ( dhcp, "DHCP %p deferring discovery\n", dhcp ); + dhcp_set_state ( dhcp, &dhcp_state_discover ); + + /* Delay first DHCPDISCOVER */ + start_timer_fixed ( &dhcp->timer, + ( DHCP_DISC_START_TIMEOUT_SEC * TICKS_PER_SEC ) ); +} + /** * Handle timer expiry during DHCP discovery * @@ -451,17 +471,6 @@ static void dhcp_discovery_rx ( struct dhcp_session *dhcp, static void dhcp_discovery_expired ( struct dhcp_session *dhcp ) { unsigned long elapsed = ( currticks() - dhcp->start ); - /* If link is blocked, defer DHCP discovery (and reset timeout) */ - if ( netdev_link_blocked ( dhcp->netdev ) && - ( dhcp->count <= DHCP_DISC_MAX_DEFERRALS ) ) { - DBGC ( dhcp, "DHCP %p deferring discovery\n", dhcp ); - dhcp->start = currticks(); - start_timer_fixed ( &dhcp->timer, - ( DHCP_DISC_START_TIMEOUT_SEC * - TICKS_PER_SEC ) ); - return; - } - /* Give up waiting for ProxyDHCP before we reach the failure point */ if ( dhcp->offer.s_addr && ( elapsed > DHCP_DISC_PROXY_TIMEOUT_SEC * TICKS_PER_SEC ) ) { @@ -469,8 +478,12 @@ static void dhcp_discovery_expired ( struct dhcp_session *dhcp ) { return; } - /* Otherwise, retransmit current packet */ + /* Retransmit current packet */ dhcp_tx ( dhcp ); + + /* If link is blocked, defer DHCP discovery timeout */ + if ( netdev_link_blocked ( dhcp->netdev ) ) + dhcp_defer ( dhcp ); } /** DHCP discovery state operations */ @@ -554,9 +567,17 @@ static void dhcp_request_rx ( struct dhcp_session *dhcp, DBGC ( dhcp, " for %s", inet_ntoa ( ip ) ); DBGC ( dhcp, "\n" ); - /* Filter out unacceptable responses */ + /* Filter out invalid port */ if ( peer->sin_port != htons ( BOOTPS_PORT ) ) return; + + /* Handle DHCPNAK */ + if ( msgtype == DHCPNAK ) { + dhcp_defer ( dhcp ); + return; + } + + /* Filter out unacceptable responses */ if ( msgtype /* BOOTP */ && ( msgtype != DHCPACK ) ) return; if ( server_id.s_addr != dhcp->server.s_addr ) diff --git a/src/net/udp/dns.c b/src/net/udp/dns.c index f412f7109..e5579174e 100644 --- a/src/net/udp/dns.c +++ b/src/net/udp/dns.c @@ -63,18 +63,33 @@ FEATURE ( FEATURE_PROTOCOL, "DNS", DHCP_EB_FEATURE_DNS, 1 ); #define EINFO_ENXIO_NO_NAMESERVER \ __einfo_uniqify ( EINFO_ENXIO, 0x02, "No DNS servers available" ) -/** The DNS server */ -static union { - struct sockaddr sa; - struct sockaddr_tcpip st; - struct sockaddr_in sin; - struct sockaddr_in6 sin6; -} nameserver = { - .st = { - .st_port = htons ( DNS_PORT ), - }, +/** A DNS server list */ +struct dns_server { + /** Server list */ + union { + /** IPv4 addresses */ + struct in_addr *in; + /** IPv6 addresses */ + struct in6_addr *in6; + /** Raw data */ + void *data; + }; + /** Number of servers */ + unsigned int count; }; +/** IPv4 DNS server list */ +static struct dns_server dns4; + +/** IPv6 DNS server list */ +static struct dns_server dns6; + +/** Total number of DNS servers */ +static unsigned int dns_count; + +/** Current DNS server index */ +static unsigned int dns_index; + /** The DNS search list */ static struct dns_name dns_search; @@ -555,6 +570,9 @@ static int dns_question ( struct dns_request *dns ) { /* Restore name */ dns->name.offset = offsetof ( typeof ( dns->buf ), name ); + /* Reset query ID */ + dns->buf.query.id = 0; + DBGC2 ( dns, "DNS %p question is %s type %s\n", dns, dns_name ( &dns->name ), dns_type ( dns->question->qtype ) ); @@ -569,24 +587,54 @@ static int dns_question ( struct dns_request *dns ) { */ static int dns_send_packet ( struct dns_request *dns ) { struct dns_header *query = &dns->buf.query; + union { + struct sockaddr sa; + struct sockaddr_tcpip st; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + } nameserver; + struct xfer_metadata meta; + unsigned int index; /* Start retransmission timer */ start_timer ( &dns->timer ); - /* Generate query identifier */ - query->id = random(); + /* Construct DNS server address */ + memset ( &nameserver, 0, sizeof ( nameserver ) ); + nameserver.st.st_port = htons ( DNS_PORT ); + if ( ! dns_count ) { + DBGC ( dns, "DNS %p lost DNS servers mid query\n", dns ); + return -EINVAL; + } + index = ( dns_index % dns_count ); + if ( index < dns6.count ) { + nameserver.sin6.sin6_family = AF_INET6; + memcpy ( &nameserver.sin6.sin6_addr, &dns6.in6[index], + sizeof ( nameserver.sin6.sin6_addr ) ); + } else { + nameserver.sin.sin_family = AF_INET; + nameserver.sin.sin_addr = dns4.in[index - dns6.count]; + } + + /* Construct metadata */ + memset ( &meta, 0, sizeof ( meta ) ); + meta.dest = &nameserver.sa; + + /* Generate query identifier if applicable */ + if ( ! query->id ) + query->id = random(); /* Send query */ - DBGC ( dns, "DNS %p sending query ID %#04x for %s type %s\n", dns, - ntohs ( query->id ), dns_name ( &dns->name ), - dns_type ( dns->question->qtype ) ); + DBGC ( dns, "DNS %p sending %s query ID %#04x for %s type %s\n", dns, + sock_ntoa ( &nameserver.sa ), ntohs ( query->id ), + dns_name ( &dns->name ), dns_type ( dns->question->qtype ) ); /* Send the data */ - return xfer_deliver_raw ( &dns->socket, query, dns->len ); + return xfer_deliver_raw_meta ( &dns->socket, query, dns->len, &meta ); } /** - * Handle DNS retransmission timer expiry + * Handle DNS (re)transmission timer expiry * * @v timer Retry timer * @v fail Failure indicator @@ -595,11 +643,18 @@ static void dns_timer_expired ( struct retry_timer *timer, int fail ) { struct dns_request *dns = container_of ( timer, struct dns_request, timer ); + /* Terminate DNS request on failure */ if ( fail ) { dns_done ( dns, -ETIMEDOUT ); - } else { - dns_send_packet ( dns ); + return; } + + /* Move to next DNS server if this is a retransmission */ + if ( dns->buf.query.id ) + dns_index++; + + /* Send DNS query */ + dns_send_packet ( dns ); } /** @@ -927,7 +982,7 @@ static int dns_resolv ( struct interface *resolv, int rc; /* Fail immediately if no DNS servers */ - if ( ! nameserver.sa.sa_family ) { + if ( dns_count == 0 ) { DBG ( "DNS not attempting to resolve \"%s\": " "no DNS servers\n", name ); rc = -ENXIO_NO_NAMESERVER; @@ -953,17 +1008,8 @@ static int dns_resolv ( struct interface *resolv, memcpy ( dns->search.data, dns_search.data, search_len ); /* Determine initial query type */ - switch ( nameserver.sa.sa_family ) { - case AF_INET: - dns->qtype = htons ( DNS_TYPE_A ); - break; - case AF_INET6: - dns->qtype = htons ( DNS_TYPE_AAAA ); - break; - default: - rc = -ENOTSUP; - goto err_type; - } + dns->qtype = ( ( dns6.count != 0 ) ? + htons ( DNS_TYPE_AAAA ) : htons ( DNS_TYPE_A ) ); /* Construct query */ query = &dns->buf.query; @@ -984,7 +1030,7 @@ static int dns_resolv ( struct interface *resolv, /* Open UDP connection */ if ( ( rc = xfer_open_socket ( &dns->socket, SOCK_DGRAM, - &nameserver.sa, NULL ) ) != 0 ) { + NULL, NULL ) ) != 0 ) { DBGC ( dns, "DNS %p could not open socket: %s\n", dns, strerror ( rc ) ); goto err_open_socket; @@ -1001,7 +1047,6 @@ static int dns_resolv ( struct interface *resolv, err_open_socket: err_question: err_encode: - err_type: ref_put ( &dns->refcnt ); err_alloc_dns: err_no_nameserver: @@ -1096,6 +1141,31 @@ const struct setting dnssl_setting __setting ( SETTING_IP_EXTRA, dnssl ) = { .type = &setting_type_dnssl, }; +/** + * Apply DNS server addresses + * + */ +static void apply_dns_servers ( void ) { + int len; + + /* Free existing server addresses */ + free ( dns4.data ); + free ( dns6.data ); + dns4.data = NULL; + dns6.data = NULL; + dns4.count = 0; + dns6.count = 0; + + /* Fetch DNS server addresses */ + len = fetch_raw_setting_copy ( NULL, &dns_setting, &dns4.data ); + if ( len >= 0 ) + dns4.count = ( len / sizeof ( dns4.in[0] ) ); + len = fetch_raw_setting_copy ( NULL, &dns6_setting, &dns6.data ); + if ( len >= 0 ) + dns6.count = ( len / sizeof ( dns6.in6[0] ) ); + dns_count = ( dns4.count + dns6.count ); +} + /** * Apply DNS search list * @@ -1109,8 +1179,7 @@ static void apply_dns_search ( void ) { memset ( &dns_search, 0, sizeof ( dns_search ) ); /* Fetch DNS search list */ - len = fetch_setting_copy ( NULL, &dnssl_setting, NULL, NULL, - &dns_search.data ); + len = fetch_raw_setting_copy ( NULL, &dnssl_setting, &dns_search.data ); if ( len >= 0 ) { dns_search.len = len; return; @@ -1138,37 +1207,49 @@ static void apply_dns_search ( void ) { * @ret rc Return status code */ static int apply_dns_settings ( void ) { + void *dbgcol = &dns_count; /* Fetch DNS server address */ - nameserver.sa.sa_family = 0; - if ( fetch_ipv6_setting ( NULL, &dns6_setting, - &nameserver.sin6.sin6_addr ) >= 0 ) { - nameserver.sin6.sin6_family = AF_INET6; - } else if ( fetch_ipv4_setting ( NULL, &dns_setting, - &nameserver.sin.sin_addr ) >= 0 ) { - nameserver.sin.sin_family = AF_INET; - } - if ( nameserver.sa.sa_family ) { - DBG ( "DNS using nameserver %s\n", - sock_ntoa ( &nameserver.sa ) ); + apply_dns_servers(); + if ( DBG_EXTRA && ( dns_count != 0 ) ) { + union { + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + } u; + unsigned int i; + + DBGC2 ( dbgcol, "DNS servers:" ); + for ( i = 0 ; i < dns6.count ; i++ ) { + u.sin6.sin6_family = AF_INET6; + memcpy ( &u.sin6.sin6_addr, &dns6.in6[i], + sizeof ( u.sin6.sin6_addr ) ); + DBGC2 ( dbgcol, " %s", sock_ntoa ( &u.sa ) ); + } + for ( i = 0 ; i < dns4.count ; i++ ) { + u.sin.sin_family = AF_INET; + u.sin.sin_addr = dns4.in[i]; + DBGC2 ( dbgcol, " %s", sock_ntoa ( &u.sa ) ); + } + DBGC2 ( dbgcol, "\n" ); } /* Fetch DNS search list */ apply_dns_search(); - if ( DBG_LOG && ( dns_search.len != 0 ) ) { + if ( DBG_EXTRA && ( dns_search.len != 0 ) ) { struct dns_name name; int offset; - DBG ( "DNS search list:" ); + DBGC2 ( dbgcol, "DNS search list:" ); memcpy ( &name, &dns_search, sizeof ( name ) ); while ( name.offset != name.len ) { - DBG ( " %s", dns_name ( &name ) ); + DBGC2 ( dbgcol, " %s", dns_name ( &name ) ); offset = dns_skip_search ( &name ); if ( offset < 0 ) break; name.offset = offset; } - DBG ( "\n" ); + DBGC2 ( dbgcol, "\n" ); } return 0; diff --git a/src/net/udp/slam.c b/src/net/udp/slam.c index c165b4fb9..47f60080b 100644 --- a/src/net/udp/slam.c +++ b/src/net/udp/slam.c @@ -655,35 +655,49 @@ static struct interface_descriptor slam_xfer_desc = */ static int slam_parse_multicast_address ( struct slam_request *slam, const char *path, - struct sockaddr_in *address ) { - char path_dup[ strlen ( path ) /* no +1 */ ]; + struct sockaddr_tcpip *address ) { + char *path_dup; char *sep; char *end; + int rc; /* Create temporary copy of path, minus the leading '/' */ assert ( *path == '/' ); - memcpy ( path_dup, ( path + 1 ) , sizeof ( path_dup ) ); + path_dup = strdup ( path + 1 ); + if ( ! path_dup ) { + rc = -ENOMEM; + goto err_strdup; + } /* Parse port, if present */ sep = strchr ( path_dup, ':' ); if ( sep ) { *(sep++) = '\0'; - address->sin_port = htons ( strtoul ( sep, &end, 0 ) ); + address->st_port = htons ( strtoul ( sep, &end, 0 ) ); if ( *end != '\0' ) { DBGC ( slam, "SLAM %p invalid multicast port " "\"%s\"\n", slam, sep ); - return -EINVAL; + rc = -EINVAL; + goto err_port; } } /* Parse address */ - if ( inet_aton ( path_dup, &address->sin_addr ) == 0 ) { + if ( sock_aton ( path_dup, ( ( struct sockaddr * ) address ) ) == 0 ) { DBGC ( slam, "SLAM %p invalid multicast address \"%s\"\n", slam, path_dup ); - return -EINVAL; + rc = -EINVAL; + goto err_addr; } - return 0; + /* Success */ + rc = 0; + + err_addr: + err_port: + free ( path_dup ); + err_strdup: + return rc; } /** @@ -701,7 +715,7 @@ static int slam_open ( struct interface *xfer, struct uri *uri ) { }; struct slam_request *slam; struct sockaddr_tcpip server; - struct sockaddr_in multicast; + struct sockaddr_tcpip multicast; int rc; /* Sanity checks */ diff --git a/src/net/udp/tftp.c b/src/net/udp/tftp.c index 6ce27497c..3073e682f 100644 --- a/src/net/udp/tftp.c +++ b/src/net/udp/tftp.c @@ -43,6 +43,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include /** @file @@ -158,6 +159,14 @@ enum { /** Maximum number of MTFTP open requests before falling back to TFTP */ #define MTFTP_MAX_TIMEOUTS 3 +/** Client profiler */ +static struct profiler tftp_client_profiler __profiler = + { .name = "tftp.client" }; + +/** Server profiler */ +static struct profiler tftp_server_profiler __profiler = + { .name = "tftp.server" }; + /** * Free TFTP request * @@ -545,8 +554,7 @@ static void tftp_timer_expired ( struct retry_timer *timer, int fail ) { * @v value Option value * @ret rc Return status code */ -static int tftp_process_blksize ( struct tftp_request *tftp, - const char *value ) { +static int tftp_process_blksize ( struct tftp_request *tftp, char *value ) { char *end; tftp->blksize = strtoul ( value, &end, 10 ); @@ -567,8 +575,7 @@ static int tftp_process_blksize ( struct tftp_request *tftp, * @v value Option value * @ret rc Return status code */ -static int tftp_process_tsize ( struct tftp_request *tftp, - const char *value ) { +static int tftp_process_tsize ( struct tftp_request *tftp, char *value ) { char *end; tftp->tsize = strtoul ( value, &end, 10 ); @@ -589,13 +596,11 @@ static int tftp_process_tsize ( struct tftp_request *tftp, * @v value Option value * @ret rc Return status code */ -static int tftp_process_multicast ( struct tftp_request *tftp, - const char *value ) { +static int tftp_process_multicast ( struct tftp_request *tftp, char *value ) { union { struct sockaddr sa; struct sockaddr_in sin; } socket; - char buf[ strlen ( value ) + 1 ]; char *addr; char *port; char *port_end; @@ -604,8 +609,7 @@ static int tftp_process_multicast ( struct tftp_request *tftp, int rc; /* Split value into "addr,port,mc" fields */ - memcpy ( buf, value, sizeof ( buf ) ); - addr = buf; + addr = value; port = strchr ( addr, ',' ); if ( ! port ) { DBGC ( tftp, "TFTP %p multicast missing port,mc\n", tftp ); @@ -662,7 +666,7 @@ struct tftp_option { * @v value Option value * @ret rc Return status code */ - int ( * process ) ( struct tftp_request *tftp, const char *value ); + int ( * process ) ( struct tftp_request *tftp, char *value ); }; /** Recognised TFTP options */ @@ -682,7 +686,7 @@ static struct tftp_option tftp_options[] = { * @ret rc Return status code */ static int tftp_process_option ( struct tftp_request *tftp, - const char *name, const char *value ) { + const char *name, char *value ) { struct tftp_option *option; for ( option = tftp_options ; option->name ; option++ ) { @@ -807,6 +811,10 @@ static int tftp_rx_data ( struct tftp_request *tftp, } block += ( ntohs ( data->block ) - 1 ); + /* Stop profiling server turnaround if applicable */ + if ( block ) + profile_stop ( &tftp_server_profiler ); + /* Extract data */ offset = ( block * tftp->blksize ); iob_pull ( iobuf, sizeof ( *data ) ); @@ -839,6 +847,12 @@ static int tftp_rx_data ( struct tftp_request *tftp, /* Acknowledge block */ tftp_send_packet ( tftp ); + /* Stop profiling client turnaround */ + profile_stop ( &tftp_client_profiler ); + + /* Start profiling server turnaround */ + profile_start ( &tftp_server_profiler ); + /* If all blocks have been received, finish. */ if ( bitmap_full ( &tftp->bitmap ) ) tftp_done ( tftp, 0 ); @@ -911,7 +925,10 @@ static int tftp_rx ( struct tftp_request *tftp, struct tftp_common *common = iobuf->data; size_t len = iob_len ( iobuf ); int rc = -EINVAL; - + + /* Start profiling client turnaround */ + profile_start ( &tftp_client_profiler ); + /* Sanity checks */ if ( len < sizeof ( *common ) ) { DBGC ( tftp, "TFTP %p received underlength packet length " diff --git a/src/net/validator.c b/src/net/validator.c index f6b03ff41..693d4464b 100644 --- a/src/net/validator.c +++ b/src/net/validator.c @@ -73,6 +73,8 @@ struct validator { /** Process */ struct process process; + /** Root of trust (or NULL to use default) */ + struct x509_root *root; /** X.509 certificate chain */ struct x509_chain *chain; /** OCSP check */ @@ -114,6 +116,7 @@ static void validator_free ( struct refcnt *refcnt ) { DBGC2 ( validator, "VALIDATOR %p \"%s\" freed\n", validator, validator_name ( validator ) ); + x509_root_put ( validator->root ); x509_chain_put ( validator->chain ); ocsp_put ( validator->ocsp ); xferbuf_free ( &validator->buffer ); @@ -554,7 +557,7 @@ static void validator_step ( struct validator *validator ) { */ now = time ( NULL ); if ( ( rc = x509_validate_chain ( validator->chain, now, NULL, - NULL ) ) == 0 ) { + validator->root ) ) == 0 ) { DBGC ( validator, "VALIDATOR %p \"%s\" validated\n", validator, validator_name ( validator ) ); validator_finished ( validator, 0 ); @@ -569,7 +572,7 @@ static void validator_step ( struct validator *validator ) { issuer = link->cert; if ( ! cert ) continue; - if ( ! x509_is_valid ( issuer ) ) + if ( ! x509_is_valid ( issuer, validator->root ) ) continue; /* The issuer is valid, but this certificate is not * yet valid. If OCSP is applicable, start it. @@ -621,9 +624,11 @@ static struct process_descriptor validator_process_desc = * * @v job Job control interface * @v chain X.509 certificate chain + * @v root Root of trust, or NULL to use default * @ret rc Return status code */ -int create_validator ( struct interface *job, struct x509_chain *chain ) { +int create_validator ( struct interface *job, struct x509_chain *chain, + struct x509_root *root ) { struct validator *validator; int rc; @@ -646,6 +651,7 @@ int create_validator ( struct interface *job, struct x509_chain *chain ) { &validator->refcnt ); process_init ( &validator->process, &validator_process_desc, &validator->refcnt ); + validator->root = x509_root_get ( root ); validator->chain = x509_chain_get ( chain ); xferbuf_malloc_init ( &validator->buffer ); diff --git a/src/scripts/efi.lds b/src/scripts/efi.lds index f1049f24b..dd7b3f019 100644 --- a/src/scripts/efi.lds +++ b/src/scripts/efi.lds @@ -8,22 +8,22 @@ SECTIONS { /* The file starts at a virtual address of zero, and sections are - * contiguous. Each section is aligned to at least _max_align, - * which defaults to 32. Load addresses are equal to virtual + * contiguous. Each section is aligned to at least _page_align, + * which defaults to 4096. Load addresses are equal to virtual * addresses. */ - _max_align = 32; + _page_align = 4096; - /* Allow plenty of space for file headers */ - . = 0x1000; + /* Allow one page of space for file headers, common PE/COFF layout */ + . = _page_align; /* * The text section * */ - . = ALIGN ( _max_align ); + . = ALIGN ( _page_align ); .text : { _text = .; *(.text) @@ -36,7 +36,7 @@ SECTIONS { * */ - . = ALIGN ( _max_align ); + . = ALIGN ( _page_align ); .rodata : { _rodata = .; *(.rodata) @@ -49,7 +49,7 @@ SECTIONS { * */ - . = ALIGN ( _max_align ); + . = ALIGN ( _page_align ); .data : { _data = .; *(.data) @@ -65,7 +65,7 @@ SECTIONS { * */ - . = ALIGN ( _max_align ); + . = ALIGN ( _page_align ); .bss : { _bss = .; *(.bss) @@ -106,5 +106,6 @@ SECTIONS { *(.einfo.*) *(.discard) *(.discard.*) + *(.pci_devlist.*) } } diff --git a/src/tests/cms_test.c b/src/tests/cms_test.c index b805a9974..f35fa206d 100644 --- a/src/tests/cms_test.c +++ b/src/tests/cms_test.c @@ -1317,6 +1317,7 @@ static struct x509_chain empty_store = { /** Root certificate list containing the iPXE self-test root CA */ static struct x509_root test_root = { + .refcnt = REF_INIT ( ref_no_free ), .digest = &cms_test_algorithm, .count = 1, .fingerprints = root_crt_fingerprint, @@ -1331,6 +1332,7 @@ static uint8_t dummy_fingerprint[] = /** Certificate store containing a dummy fingerprint */ static struct x509_root dummy_root = { + .refcnt = REF_INIT ( ref_no_free ), .digest = &cms_test_algorithm, .count = 1, .fingerprints = dummy_fingerprint, diff --git a/src/tests/gzip_test.c b/src/tests/gzip_test.c new file mode 100644 index 000000000..fa76edc53 --- /dev/null +++ b/src/tests/gzip_test.c @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * gzip image tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include + +/** A gzip test */ +struct gzip_test { + /** Compressed filename */ + const char *compressed_name; + /** Compressed data */ + const void *compressed; + /** Length of compressed data */ + size_t compressed_len; + /** Expected uncompressed name */ + const char *expected_name; + /** Expected uncompressed data */ + const void *expected; + /** Length of expected uncompressed data */ + size_t expected_len; +}; + +/** Define inline data */ +#define DATA(...) { __VA_ARGS__ } + +/** Define a gzip test */ +#define GZIP( name, COMPRESSED, EXPECTED ) \ + static const uint8_t name ## _compressed[] = COMPRESSED; \ + static const uint8_t name ## _expected[] = EXPECTED; \ + static struct gzip_test name = { \ + .compressed_name = #name ".gz", \ + .compressed = name ## _compressed, \ + .compressed_len = sizeof ( name ## _compressed ), \ + .expected_name = #name, \ + .expected = name ## _expected, \ + .expected_len = sizeof ( name ## _expected ), \ + }; + +/** "Hello world" */ +GZIP ( hello_world, + DATA ( 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, + 0x49, 0x01, 0x00, 0x52, 0x9e, 0xd6, 0x8b, 0x0b, 0x00, 0x00, + 0x00 ), + DATA ( 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, + 0x64 ) ); + +/** "Hello filename" */ +GZIP ( hello_filename, + DATA ( 0x1f, 0x8b, 0x08, 0x08, 0xeb, 0x5b, 0x96, 0x60, 0x00, 0x03, + 0x68, 0x77, 0x2e, 0x74, 0x78, 0x74, 0x00, 0xf3, 0x48, 0xcd, + 0xc9, 0xc9, 0x57, 0x48, 0xcb, 0xcc, 0x49, 0xcd, 0x4b, 0xcc, + 0x4d, 0x05, 0x00, 0x69, 0x37, 0x25, 0x3c, 0x0e, 0x00, 0x00, + 0x00 ), + DATA ( 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x66, 0x69, 0x6c, 0x65, + 0x6e, 0x61, 0x6d, 0x65 ) ); + +/** "Hello assorted headers" */ +GZIP ( hello_headers, + DATA ( 0x1f, 0x8b, 0x08, 0x1c, 0x11, 0x5c, 0x96, 0x60, 0x00, 0x03, + 0x05, 0x00, 0x41, 0x70, 0x01, 0x00, 0x0d, 0x68, 0x77, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0x2f, 0x2f, 0x77, 0x68, 0x79, 0x3f, + 0x00, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x48, 0x2c, 0x2e, + 0xce, 0x2f, 0x2a, 0x49, 0x4d, 0x51, 0xc8, 0x48, 0x4d, 0x4c, + 0x49, 0x2d, 0x2a, 0x06, 0x00, 0x59, 0xa4, 0x19, 0x61, 0x16, + 0x00, 0x00, 0x00 ), + DATA ( 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x61, 0x73, 0x73, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x20, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73 ) ); + +/** + * Report gzip test result + * + * @v test gzip test + * @v file Test code file + * @v line Test code line + */ +static void gzip_okx ( struct gzip_test *test, const char *file, + unsigned int line ) { + struct image *image; + struct image *extracted; + + /* Construct compressed image */ + image = image_memory ( test->compressed_name, + virt_to_user ( test->compressed ), + test->compressed_len ); + okx ( image != NULL, file, line ); + okx ( image->len == test->compressed_len, file, line ); + + /* Check type detection */ + okx ( image->type == &gzip_image_type, file, line ); + + /* Extract archive image */ + okx ( image_extract ( image, NULL, &extracted ) == 0, file, line ); + + /* Verify extracted image content */ + okx ( extracted->len == test->expected_len, file, line ); + okx ( memcmp_user ( extracted->data, 0, + virt_to_user ( test->expected ), 0, + test->expected_len ) == 0, file, line ); + + /* Verify extracted image name */ + okx ( strcmp ( extracted->name, test->expected_name ) == 0, + file, line ); + + /* Unregister images */ + unregister_image ( extracted ); + unregister_image ( image ); +} +#define gzip_ok( test ) gzip_okx ( test, __FILE__, __LINE__ ) + +/** + * Perform gzip self-test + * + */ +static void gzip_test_exec ( void ) { + + gzip_ok ( &hello_world ); + gzip_ok ( &hello_filename ); + gzip_ok ( &hello_headers ); +} + +/** gzip self-test */ +struct self_test gzip_test __self_test = { + .name = "gzip", + .exec = gzip_test_exec, +}; diff --git a/src/tests/ocsp_test.c b/src/tests/ocsp_test.c index a3349346a..3d2f556ed 100644 --- a/src/tests/ocsp_test.c +++ b/src/tests/ocsp_test.c @@ -42,6 +42,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include #include #include +#include #include #include @@ -110,7 +111,7 @@ static void ocsp_prepare_test ( struct ocsp_test *test ) { x509_invalidate ( cert ); /* Force-validate issuer certificate */ - issuer->flags |= X509_FL_VALIDATED; + issuer->root = &root_certificates; issuer->path_remaining = ( issuer->extensions.basic.path_len + 1 ); } diff --git a/src/tests/rsa_test.c b/src/tests/rsa_test.c index 91066faab..c5b587ca7 100644 --- a/src/tests/rsa_test.c +++ b/src/tests/rsa_test.c @@ -124,8 +124,8 @@ struct rsa_signature_test { const void *plaintext; /** Plaintext length */ size_t plaintext_len; - /** Digest algorithm */ - struct digest_algorithm *digest; + /** Signature algorithm */ + struct asn1_algorithm *algorithm; /** Signature */ const void *signature; /** Signature length */ @@ -139,12 +139,12 @@ struct rsa_signature_test { * @v PRIVATE Private key * @v PUBLIC Public key * @v PLAINTEXT Plaintext - * @v DIGEST Digest algorithm + * @v ALGORITHM Signature algorithm * @v SIGNATURE Signature * @ret test Signature test */ -#define RSA_SIGNATURE_TEST( name, PRIVATE, PUBLIC, PLAINTEXT, DIGEST, \ - SIGNATURE ) \ +#define RSA_SIGNATURE_TEST( name, PRIVATE, PUBLIC, PLAINTEXT, \ + ALGORITHM, SIGNATURE ) \ static const uint8_t name ## _private[] = PRIVATE; \ static const uint8_t name ## _public[] = PUBLIC; \ static const uint8_t name ## _plaintext[] = PLAINTEXT; \ @@ -156,7 +156,7 @@ struct rsa_signature_test { .public_len = sizeof ( name ## _public ), \ .plaintext = name ## _plaintext, \ .plaintext_len = sizeof ( name ## _plaintext ), \ - .digest = DIGEST, \ + .algorithm = ALGORITHM, \ .signature = name ## _signature, \ .signature_len = sizeof ( name ## _signature ), \ } @@ -188,18 +188,19 @@ struct rsa_signature_test { * @v test RSA signature test */ #define rsa_signature_ok( test ) do { \ + struct digest_algorithm *digest = (test)->algorithm->digest; \ uint8_t bad_signature[ (test)->signature_len ]; \ pubkey_sign_ok ( &rsa_algorithm, (test)->private, \ - (test)->private_len, (test)->digest, \ + (test)->private_len, digest, \ (test)->plaintext, (test)->plaintext_len, \ (test)->signature, (test)->signature_len ); \ pubkey_verify_ok ( &rsa_algorithm, (test)->public, \ - (test)->public_len, (test)->digest, \ + (test)->public_len, digest, \ (test)->plaintext, (test)->plaintext_len, \ (test)->signature, (test)->signature_len ); \ memset ( bad_signature, 0, sizeof ( bad_signature ) ); \ pubkey_verify_fail_ok ( &rsa_algorithm, (test)->public, \ - (test)->public_len, (test)->digest, \ + (test)->public_len, digest, \ (test)->plaintext, \ (test)->plaintext_len, bad_signature, \ sizeof ( bad_signature ) ); \ @@ -323,7 +324,7 @@ RSA_SIGNATURE_TEST ( md5_test, 0xf2, 0x8d, 0xfc, 0xfc, 0x37, 0xf7, 0xc7, 0x6d, 0x6c, 0xd8, 0x24, 0x0c, 0x6a, 0xec, 0x82, 0x5c, 0x72, 0xf1, 0xfc, 0x05, 0xed, 0x8e, 0xe8, 0xd9, 0x8b, 0x8b, 0x67, 0x02, 0x95 ), - &md5_algorithm, + &md5_with_rsa_encryption_algorithm, SIGNATURE ( 0xdb, 0x56, 0x3d, 0xea, 0xae, 0x81, 0x4b, 0x3b, 0x2e, 0x8e, 0xb8, 0xee, 0x13, 0x61, 0xc6, 0xe7, 0xd7, 0x50, 0xcd, 0x0d, 0x34, 0x3a, 0xfe, 0x9a, 0x8d, 0xf8, 0xfb, 0xd6, 0x7e, 0xbd, @@ -396,7 +397,7 @@ RSA_SIGNATURE_TEST ( sha1_test, 0x30, 0x91, 0x1c, 0xaa, 0x6c, 0x24, 0x42, 0x1b, 0x1a, 0xba, 0x30, 0x40, 0x49, 0x83, 0xd9, 0xd7, 0x66, 0x7e, 0x5c, 0x1a, 0x4b, 0x7f, 0xa6, 0x8e, 0x8a, 0xd6, 0x0c, 0x65, 0x75 ), - &sha1_algorithm, + &sha1_with_rsa_encryption_algorithm, SIGNATURE ( 0xa5, 0x5a, 0x8a, 0x67, 0x81, 0x76, 0x7e, 0xad, 0x99, 0x22, 0xf1, 0x47, 0x64, 0xd2, 0xfb, 0x81, 0x45, 0xeb, 0x85, 0x56, 0xf8, 0x7d, 0xb8, 0xec, 0x41, 0x17, 0x84, 0xf7, 0x2b, 0xbb, @@ -469,7 +470,7 @@ RSA_SIGNATURE_TEST ( sha256_test, 0x91, 0x71, 0xd6, 0x2d, 0xa1, 0xae, 0x81, 0x0c, 0xed, 0x54, 0x48, 0x79, 0x8a, 0x78, 0x05, 0x74, 0x4d, 0x4f, 0xf0, 0xe0, 0x3c, 0x41, 0x5c, 0x04, 0x0b, 0x68, 0x57, 0xc5, 0xd6 ), - &sha256_algorithm, + &sha256_with_rsa_encryption_algorithm, SIGNATURE ( 0x02, 0x2e, 0xc5, 0x2a, 0x2b, 0x7f, 0xb4, 0x80, 0xca, 0x9d, 0x96, 0x5b, 0xaf, 0x1f, 0x72, 0x5b, 0x6e, 0xf1, 0x69, 0x7f, 0x4d, 0x41, 0xd5, 0x9f, 0x00, 0xdc, 0x47, 0xf4, 0x68, 0x8f, diff --git a/src/tests/string_test.c b/src/tests/string_test.c index a66501da3..3afb8deb2 100644 --- a/src/tests/string_test.c +++ b/src/tests/string_test.c @@ -105,10 +105,19 @@ static void string_test_exec ( void ) { ok ( strcasecmp ( "Uncle", "Uncle Jack" ) != 0 ); ok ( strcasecmp ( "not", "equal" ) != 0 ); + /* Test strncasecmp() */ + ok ( strncasecmp ( "", "", 0 ) == 0 ); + ok ( strncasecmp ( "", "", 73 ) == 0 ); + ok ( strncasecmp ( "Uncle Jack", "Uncle jack", 47 ) == 0 ); + ok ( strncasecmp ( "Uncle Jack", "Uncle jake", 47 ) != 0 ); + ok ( strncasecmp ( "Uncle Jack", "Uncle jake", 9 ) != 0 ); + ok ( strncasecmp ( "Uncle Jack", "Uncle jake", 8 ) == 0 ); + /* Test memcmp() */ ok ( memcmp ( "", "", 0 ) == 0 ); ok ( memcmp ( "Foo", "Foo", 3 ) == 0 ); ok ( memcmp ( "Foo", "Bar", 3 ) != 0 ); + ok ( memcmp ( "abc", "def", 3 ) < 0 ); /* Test strstr() */ { diff --git a/src/tests/tests.c b/src/tests/tests.c index 2e812d6ff..1cc4c81e8 100644 --- a/src/tests/tests.c +++ b/src/tests/tests.c @@ -73,3 +73,5 @@ REQUIRE_OBJECT ( bitops_test ); REQUIRE_OBJECT ( der_test ); REQUIRE_OBJECT ( pem_test ); REQUIRE_OBJECT ( ntlm_test ); +REQUIRE_OBJECT ( zlib_test ); +REQUIRE_OBJECT ( gzip_test ); diff --git a/src/tests/x509_test.c b/src/tests/x509_test.c index 658d5247c..b6cba575c 100644 --- a/src/tests/x509_test.c +++ b/src/tests/x509_test.c @@ -674,6 +674,7 @@ static struct x509_chain empty_store = { /** Root certificate list containing the iPXE self-test root CA */ static struct x509_root test_root = { + .refcnt = REF_INIT ( ref_no_free ), .digest = &x509_test_algorithm, .count = 1, .fingerprints = root_crt_fingerprint, @@ -681,6 +682,7 @@ static struct x509_root test_root = { /** Root certificate list containing the iPXE self-test intermediate CA */ static struct x509_root intermediate_root = { + .refcnt = REF_INIT ( ref_no_free ), .digest = &x509_test_algorithm, .count = 1, .fingerprints = intermediate_crt_fingerprint, @@ -695,6 +697,7 @@ static uint8_t dummy_fingerprint[] = /** Certificate store containing a dummy fingerprint */ static struct x509_root dummy_root = { + .refcnt = REF_INIT ( ref_no_free ), .digest = &x509_test_algorithm, .count = 1, .fingerprints = dummy_fingerprint, @@ -943,6 +946,10 @@ static void x509_validate_chain_okx ( struct x509_test_chain *chn, time_t time, x509_invalidate_chain ( chn->chain ); okx ( x509_validate_chain ( chn->chain, time, store, root ) == 0, file, line ); + okx ( x509_is_valid ( chn->certs[0]->cert, root ), + file, line ); + okx ( ! x509_is_valid ( chn->certs[0]->cert, &dummy_root ), + file, line ); } #define x509_validate_chain_ok( chn, time, store, root ) \ x509_validate_chain_okx ( chn, time, store, root, __FILE__, __LINE__ ) @@ -1030,6 +1037,7 @@ static void x509_test_exec ( void ) { /* Check certificate names */ x509_check_name_ok ( &server_crt, "boot.test.ipxe.org" ); x509_check_name_ok ( &server_crt, "demo.test.ipxe.org" ); + x509_check_name_ok ( &server_crt, "demo.test.iPXE.org" ); x509_check_name_fail_ok ( &server_crt, "incorrect.test.ipxe.org" ); x509_check_name_ok ( &server_crt, "anything.alt.test.ipxe.org" ); x509_check_name_ok ( &server_crt, "wildcard.alt.test.ipxe.org" ); diff --git a/src/tests/zlib_test.c b/src/tests/zlib_test.c new file mode 100644 index 000000000..df52d09ac --- /dev/null +++ b/src/tests/zlib_test.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +/** @file + * + * zlib image tests + * + */ + +/* Forcibly enable assertions */ +#undef NDEBUG + +#include +#include +#include +#include + +/** A zlib test */ +struct zlib_test { + /** Compressed filename */ + const char *compressed_name; + /** Compressed data */ + const void *compressed; + /** Length of compressed data */ + size_t compressed_len; + /** Expected uncompressed name */ + const char *expected_name; + /** Expected uncompressed data */ + const void *expected; + /** Length of expected uncompressed data */ + size_t expected_len; +}; + +/** Define inline data */ +#define DATA(...) { __VA_ARGS__ } + +/** Define a zlib test */ +#define ZLIB( name, COMPRESSED, EXPECTED ) \ + static const uint8_t name ## _compressed[] = COMPRESSED; \ + static const uint8_t name ## _expected[] = EXPECTED; \ + static struct zlib_test name = { \ + .compressed_name = #name ".z", \ + .compressed = name ## _compressed, \ + .compressed_len = sizeof ( name ## _compressed ), \ + .expected_name = #name, \ + .expected = name ## _expected, \ + .expected_len = sizeof ( name ## _expected ), \ + }; + +/** "Hello world" */ +ZLIB ( hello_world, + DATA ( 0x78, 0x9c, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x28, 0xcf, + 0x2f, 0xca, 0x49, 0x01, 0x00, 0x18, 0xab, 0x04, 0x3d ), + DATA ( 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, + 0x64 ) ); + +/** + * Report zlib test result + * + * @v test zlib test + * @v file Test code file + * @v line Test code line + */ +static void zlib_okx ( struct zlib_test *test, const char *file, + unsigned int line ) { + struct image *image; + struct image *extracted; + + /* Construct compressed image */ + image = image_memory ( test->compressed_name, + virt_to_user ( test->compressed ), + test->compressed_len ); + okx ( image != NULL, file, line ); + okx ( image->len == test->compressed_len, file, line ); + + /* Check type detection */ + okx ( image->type == &zlib_image_type, file, line ); + + /* Extract archive image */ + okx ( image_extract ( image, NULL, &extracted ) == 0, file, line ); + + /* Verify extracted image content */ + okx ( extracted->len == test->expected_len, file, line ); + okx ( memcmp_user ( extracted->data, 0, + virt_to_user ( test->expected ), 0, + test->expected_len ) == 0, file, line ); + + /* Verify extracted image name */ + okx ( strcmp ( extracted->name, test->expected_name ) == 0, + file, line ); + + /* Unregister images */ + unregister_image ( extracted ); + unregister_image ( image ); +} +#define zlib_ok( test ) zlib_okx ( test, __FILE__, __LINE__ ) + +/** + * Perform zlib self-test + * + */ +static void zlib_test_exec ( void ) { + + zlib_ok ( &hello_world ); +} + +/** zlib self-test */ +struct self_test zlib_test __self_test = { + .name = "zlib", + .exec = zlib_test_exec, +}; diff --git a/src/usr/autoboot.c b/src/usr/autoboot.c index 106e0f879..62e90ecd0 100644 --- a/src/usr/autoboot.c +++ b/src/usr/autoboot.c @@ -210,18 +210,19 @@ int uriboot ( struct uri *filename, struct uri **root_paths, } /** - * Close all open net devices + * Close all but one network device * * Called before a fresh boot attempt in order to free up memory. We * don't just close the device immediately after the boot fails, * because there may still be TCP connections in the process of * closing. */ -static void close_all_netdevs ( void ) { - struct net_device *netdev; +static void close_other_netdevs ( struct net_device *netdev ) { + struct net_device *other; - for_each_netdev ( netdev ) { - ifclose ( netdev ); + for_each_netdev ( other ) { + if ( other != netdev ) + ifclose ( other ); } } @@ -388,7 +389,7 @@ int netboot ( struct net_device *netdev ) { int rc; /* Close all other network devices */ - close_all_netdevs(); + close_other_netdevs ( netdev ); /* Open device and display device status */ if ( ( rc = ifopen ( netdev ) ) != 0 ) @@ -396,7 +397,7 @@ int netboot ( struct net_device *netdev ) { ifstat ( netdev ); /* Configure device */ - if ( ( rc = ifconf ( netdev, NULL ) ) != 0 ) + if ( ( rc = ifconf ( netdev, NULL, 0 ) ) != 0 ) goto err_dhcp; route(); diff --git a/src/usr/certmgmt.c b/src/usr/certmgmt.c index 2f233fe4f..e6bf51fd8 100644 --- a/src/usr/certmgmt.c +++ b/src/usr/certmgmt.c @@ -57,7 +57,7 @@ void certstat ( struct x509_certificate *cert ) { printf ( " [PERMANENT]" ); if ( cert->flags & X509_FL_EXPLICIT ) printf ( " [EXPLICIT]" ); - if ( x509_is_valid ( cert ) ) + if ( x509_is_valid ( cert, NULL ) ) printf ( " [VALIDATED]" ); printf ( "\n" ); } diff --git a/src/usr/ifmgmt.c b/src/usr/ifmgmt.c index f367149f7..d87ffff27 100644 --- a/src/usr/ifmgmt.c +++ b/src/usr/ifmgmt.c @@ -108,10 +108,11 @@ static void ifstat_errors ( struct net_device_stats *stats, * @v netdev Network device */ void ifstat ( struct net_device *netdev ) { - printf ( "%s: %s using %s on %s (%s)\n" + printf ( "%s: %s using %s on %s (%s) [%s]\n" " [Link:%s%s, TX:%d TXE:%d RX:%d RXE:%d]\n", netdev->name, netdev_addr ( netdev ), netdev->dev->driver_name, netdev->dev->name, + netdev->ll_protocol->name, ( netdev_is_open ( netdev ) ? "open" : "closed" ), ( netdev_link_ok ( netdev ) ? "up" : "down" ), ( netdev_link_blocked ( netdev ) ? " (blocked)" : "" ), @@ -212,17 +213,20 @@ static int iflinkwait_progress ( struct ifpoller *ifpoller ) { * * @v netdev Network device * @v timeout Timeout period, in ticks + * @v verbose Always display progress message + * @ret rc Return status code */ -int iflinkwait ( struct net_device *netdev, unsigned long timeout ) { +int iflinkwait ( struct net_device *netdev, unsigned long timeout, + int verbose ) { int rc; /* Ensure device is open */ if ( ( rc = ifopen ( netdev ) ) != 0 ) return rc; - /* Return immediately if link is already up */ + /* Return immediately if link is already up, unless being verbose */ netdev_poll ( netdev ); - if ( netdev_link_ok ( netdev ) ) + if ( netdev_link_ok ( netdev ) && ( ! verbose ) ) return 0; /* Wait for link-up */ @@ -264,14 +268,16 @@ static int ifconf_progress ( struct ifpoller *ifpoller ) { * * @v netdev Network device * @v configurator Network device configurator, or NULL to use all + * @v timeout Timeout period, in ticks * @ret rc Return status code */ int ifconf ( struct net_device *netdev, - struct net_device_configurator *configurator ) { + struct net_device_configurator *configurator, + unsigned long timeout ) { int rc; /* Ensure device is open and link is up */ - if ( ( rc = iflinkwait ( netdev, LINK_WAIT_TIMEOUT ) ) != 0 ) + if ( ( rc = iflinkwait ( netdev, LINK_WAIT_TIMEOUT, 0 ) ) != 0 ) return rc; /* Start configuration */ @@ -296,5 +302,5 @@ int ifconf ( struct net_device *netdev, ( configurator ? configurator->name : "" ), ( configurator ? "] " : "" ), netdev->name, netdev->ll_protocol->ntoa ( netdev->ll_addr ) ); - return ifpoller_wait ( netdev, configurator, 0, ifconf_progress ); + return ifpoller_wait ( netdev, configurator, timeout, ifconf_progress ); } diff --git a/src/usr/imgarchive.c b/src/usr/imgarchive.c new file mode 100644 index 000000000..6849dd510 --- /dev/null +++ b/src/usr/imgarchive.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2021 Michael Brown . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include +#include +#include + +/** @file + * + * Archive image management + * + */ + +/** + * Extract archive image + * + * @v image Image + * @v name Extracted image name (or NULL to use default) + * @ret rc Return status code + */ +int imgextract ( struct image *image, const char *name ) { + struct image *extracted; + int rc; + + /* Extract archive image */ + if ( ( rc = image_extract ( image, name, &extracted ) ) != 0 ) { + printf ( "Could not extract image: %s\n", strerror ( rc ) ); + return rc; + } + + return 0; +} diff --git a/src/usr/imgmgmt.c b/src/usr/imgmgmt.c index a01d6e291..f8d149153 100644 --- a/src/usr/imgmgmt.c +++ b/src/usr/imgmgmt.c @@ -169,3 +169,24 @@ void imgstat ( struct image *image ) { printf ( " \"%s\"", image->cmdline ); printf ( "\n" ); } + +/** + * Create image from block of memory + * + * @v name Name + * @v data Image data + * @v len Length + * @ret rc Return status code + */ +int imgmem ( const char *name, userptr_t data, size_t len ) { + struct image *image; + + /* Create image */ + image = image_memory ( name, data, len ); + if ( ! image ) { + printf ( "Could not create image\n" ); + return -ENOMEM; + } + + return 0; +} diff --git a/src/usr/imgtrust.c b/src/usr/imgtrust.c index 595ea6b25..e7c2067a0 100644 --- a/src/usr/imgtrust.c +++ b/src/usr/imgtrust.c @@ -77,7 +77,8 @@ int imgverify ( struct image *image, struct image *signature, /* Complete all certificate chains */ list_for_each_entry ( info, &sig->info, list ) { - if ( ( rc = create_validator ( &monojob, info->chain ) ) != 0 ) + if ( ( rc = create_validator ( &monojob, info->chain, + NULL ) ) != 0 ) goto err_create_validator; if ( ( rc = monojob_wait ( NULL, 0 ) ) != 0 ) goto err_validator_wait; diff --git a/src/usr/lotest.c b/src/usr/lotest.c index 6b75b5048..5b88ef27e 100644 --- a/src/usr/lotest.c +++ b/src/usr/lotest.c @@ -208,9 +208,9 @@ int loopback_test ( struct net_device *sender, struct net_device *receiver, return rc; /* Wait for link-up */ - if ( ( rc = iflinkwait ( sender, 0 ) ) != 0 ) + if ( ( rc = iflinkwait ( sender, 0, 0 ) ) != 0 ) return rc; - if ( ( rc = iflinkwait ( receiver, 0 ) ) != 0 ) + if ( ( rc = iflinkwait ( receiver, 0, 0 ) ) != 0 ) return rc; /* Allocate data buffer */ diff --git a/src/util/.gitignore b/src/util/.gitignore index 33bedefd0..b4cb13601 100644 --- a/src/util/.gitignore +++ b/src/util/.gitignore @@ -6,5 +6,4 @@ elf2efi32 elf2efi64 efirom efifatbin -iccfix einfo diff --git a/src/util/eficompress.c b/src/util/eficompress.c new file mode 100644 index 000000000..4fd74ccee --- /dev/null +++ b/src/util/eficompress.c @@ -0,0 +1,1588 @@ +/** @file +Compression routine. The compression algorithm is a mixture of LZ77 and Huffman +coding. LZ77 transforms the source data into a sequence of Original Characters +and Pointers to repeated strings. This sequence is further divided into Blocks +and Huffman codings are applied to each Block. + +Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.
+SPDX-License-Identifier: BSD-2-Clause-Patent + +**/ + +// +// Macro Definitions +// + +#undef UINT8_MAX +typedef INT16 NODE; +#define UINT8_MAX 0xff +#define UINT8_BIT 8 +#define THRESHOLD 3 +#define INIT_CRC 0 +#define WNDBIT 13 +#define WNDSIZ (1 << WNDBIT) +#define MAXMATCH 256 +#define PERC_FLAG 0x8000U +#define CODE_BIT 16 +#define NIL 0 +#define MAX_HASH_VAL (3 * WNDSIZ + (WNDSIZ / 512 + 1) * UINT8_MAX) +#define HASH(p, c) ((p) + ((c) << (WNDBIT - 9)) + WNDSIZ * 2) +#define CRCPOLY 0xA001 +#define UPDATE_CRC(c) mCrc = mCrcTable[(mCrc ^ (c)) & 0xFF] ^ (mCrc >> UINT8_BIT) + +// +// C: the Char&Len Set; P: the Position Set; T: the exTra Set +// + +#define NC (UINT8_MAX + MAXMATCH + 2 - THRESHOLD) +#define CBIT 9 +#define NP (WNDBIT + 1) +#define PBIT 4 +#define NT (CODE_BIT + 3) +#define TBIT 5 +#if NT > NP + #define NPT NT +#else + #define NPT NP +#endif + +// +// Function Prototypes +// + +STATIC +VOID +PutDword( + IN UINT32 Data + ); + +STATIC +EFI_STATUS +AllocateMemory ( + ); + +STATIC +VOID +FreeMemory ( + ); + +STATIC +VOID +InitSlide ( + ); + +STATIC +NODE +Child ( + IN NODE q, + IN UINT8 c + ); + +STATIC +VOID +MakeChild ( + IN NODE q, + IN UINT8 c, + IN NODE r + ); + +STATIC +VOID +Split ( + IN NODE Old + ); + +STATIC +VOID +InsertNode ( + ); + +STATIC +VOID +DeleteNode ( + ); + +STATIC +VOID +GetNextMatch ( + ); + +STATIC +EFI_STATUS +Encode ( + ); + +STATIC +VOID +CountTFreq ( + ); + +STATIC +VOID +WritePTLen ( + IN INT32 n, + IN INT32 nbit, + IN INT32 Special + ); + +STATIC +VOID +WriteCLen ( + ); + +STATIC +VOID +EncodeC ( + IN INT32 c + ); + +STATIC +VOID +EncodeP ( + IN UINT32 p + ); + +STATIC +VOID +SendBlock ( + ); + +STATIC +VOID +Output ( + IN UINT32 c, + IN UINT32 p + ); + +STATIC +VOID +HufEncodeStart ( + ); + +STATIC +VOID +HufEncodeEnd ( + ); + +STATIC +VOID +MakeCrcTable ( + ); + +STATIC +VOID +PutBits ( + IN INT32 n, + IN UINT32 x + ); + +STATIC +INT32 +FreadCrc ( + OUT UINT8 *p, + IN INT32 n + ); + +STATIC +VOID +InitPutBits ( + ); + +STATIC +VOID +CountLen ( + IN INT32 i + ); + +STATIC +VOID +MakeLen ( + IN INT32 Root + ); + +STATIC +VOID +DownHeap ( + IN INT32 i + ); + +STATIC +VOID +MakeCode ( + IN INT32 n, + IN UINT8 Len[], + OUT UINT16 Code[] + ); + +STATIC +INT32 +MakeTree ( + IN INT32 NParm, + IN UINT16 FreqParm[], + OUT UINT8 LenParm[], + OUT UINT16 CodeParm[] + ); + + +// +// Global Variables +// + +STATIC UINT8 *mSrc, *mDst, *mSrcUpperLimit, *mDstUpperLimit; + +STATIC UINT8 *mLevel, *mText, *mChildCount, *mBuf, mCLen[NC], mPTLen[NPT], *mLen; +STATIC INT16 mHeap[NC + 1]; +STATIC INT32 mRemainder, mMatchLen, mBitCount, mHeapSize, mN; +STATIC UINT32 mBufSiz = 0, mOutputPos, mOutputMask, mSubBitBuf, mCrc; +STATIC UINT32 mCompSize, mOrigSize; + +STATIC UINT16 *mFreq, *mSortPtr, mLenCnt[17], mLeft[2 * NC - 1], mRight[2 * NC - 1], + mCrcTable[UINT8_MAX + 1], mCFreq[2 * NC - 1],mCCode[NC], + mPFreq[2 * NP - 1], mPTCode[NPT], mTFreq[2 * NT - 1]; + +STATIC NODE mPos, mMatchPos, mAvail, *mPosition, *mParent, *mPrev, *mNext = NULL; + + +// +// functions +// + +EFI_STATUS +EfiCompress ( + IN UINT8 *SrcBuffer, + IN UINT32 SrcSize, + IN UINT8 *DstBuffer, + IN OUT UINT32 *DstSize + ) +/*++ + +Routine Description: + + The main compression routine. + +Arguments: + + SrcBuffer - The buffer storing the source data + SrcSize - The size of source data + DstBuffer - The buffer to store the compressed data + DstSize - On input, the size of DstBuffer; On output, + the size of the actual compressed data. + +Returns: + + EFI_BUFFER_TOO_SMALL - The DstBuffer is too small. In this case, + DstSize contains the size needed. + EFI_SUCCESS - Compression is successful. + +--*/ +{ + EFI_STATUS Status = EFI_SUCCESS; + + // + // Initializations + // + mBufSiz = 0; + mBuf = NULL; + mText = NULL; + mLevel = NULL; + mChildCount = NULL; + mPosition = NULL; + mParent = NULL; + mPrev = NULL; + mNext = NULL; + + + mSrc = SrcBuffer; + mSrcUpperLimit = mSrc + SrcSize; + mDst = DstBuffer; + mDstUpperLimit = mDst + *DstSize; + + PutDword(0L); + PutDword(0L); + + MakeCrcTable (); + + mOrigSize = mCompSize = 0; + mCrc = INIT_CRC; + + // + // Compress it + // + + Status = Encode(); + if (EFI_ERROR (Status)) { + return EFI_OUT_OF_RESOURCES; + } + + // + // Null terminate the compressed data + // + if (mDst < mDstUpperLimit) { + *mDst++ = 0; + } + + // + // Fill in compressed size and original size + // + mDst = DstBuffer; + PutDword(mCompSize+1); + PutDword(mOrigSize); + + // + // Return + // + + if (mCompSize + 1 + 8 > *DstSize) { + *DstSize = mCompSize + 1 + 8; + return EFI_BUFFER_TOO_SMALL; + } else { + *DstSize = mCompSize + 1 + 8; + return EFI_SUCCESS; + } + +} + +STATIC +VOID +PutDword( + IN UINT32 Data + ) +/*++ + +Routine Description: + + Put a dword to output stream + +Arguments: + + Data - the dword to put + +Returns: (VOID) + +--*/ +{ + if (mDst < mDstUpperLimit) { + *mDst++ = (UINT8)(((UINT8)(Data )) & 0xff); + } + + if (mDst < mDstUpperLimit) { + *mDst++ = (UINT8)(((UINT8)(Data >> 0x08)) & 0xff); + } + + if (mDst < mDstUpperLimit) { + *mDst++ = (UINT8)(((UINT8)(Data >> 0x10)) & 0xff); + } + + if (mDst < mDstUpperLimit) { + *mDst++ = (UINT8)(((UINT8)(Data >> 0x18)) & 0xff); + } +} + +STATIC +EFI_STATUS +AllocateMemory () +/*++ + +Routine Description: + + Allocate memory spaces for data structures used in compression process + +Arguments: (VOID) + +Returns: + + EFI_SUCCESS - Memory is allocated successfully + EFI_OUT_OF_RESOURCES - Allocation fails + +--*/ +{ + UINT32 i; + + mText = malloc (WNDSIZ * 2 + MAXMATCH); + if (mText == NULL) { + return EFI_OUT_OF_RESOURCES; + } + for (i = 0 ; i < WNDSIZ * 2 + MAXMATCH; i ++) { + mText[i] = 0; + } + + mLevel = malloc ((WNDSIZ + UINT8_MAX + 1) * sizeof(*mLevel)); + mChildCount = malloc ((WNDSIZ + UINT8_MAX + 1) * sizeof(*mChildCount)); + mPosition = malloc ((WNDSIZ + UINT8_MAX + 1) * sizeof(*mPosition)); + mParent = malloc (WNDSIZ * 2 * sizeof(*mParent)); + mPrev = malloc (WNDSIZ * 2 * sizeof(*mPrev)); + mNext = malloc ((MAX_HASH_VAL + 1) * sizeof(*mNext)); + if (mLevel == NULL || mChildCount == NULL || mPosition == NULL || + mParent == NULL || mPrev == NULL || mNext == NULL) { + return EFI_OUT_OF_RESOURCES; + } + + mBufSiz = 16 * 1024U; + while ((mBuf = malloc(mBufSiz)) == NULL) { + mBufSiz = (mBufSiz / 10U) * 9U; + if (mBufSiz < 4 * 1024U) { + return EFI_OUT_OF_RESOURCES; + } + } + mBuf[0] = 0; + + return EFI_SUCCESS; +} + +VOID +FreeMemory () +/*++ + +Routine Description: + + Called when compression is completed to free memory previously allocated. + +Arguments: (VOID) + +Returns: (VOID) + +--*/ +{ + if (mText) { + free (mText); + } + + if (mLevel) { + free (mLevel); + } + + if (mChildCount) { + free (mChildCount); + } + + if (mPosition) { + free (mPosition); + } + + if (mParent) { + free (mParent); + } + + if (mPrev) { + free (mPrev); + } + + if (mNext) { + free (mNext); + } + + if (mBuf) { + free (mBuf); + } + + return; +} + + +STATIC +VOID +InitSlide () +/*++ + +Routine Description: + + Initialize String Info Log data structures + +Arguments: (VOID) + +Returns: (VOID) + +--*/ +{ + NODE i; + + for (i = WNDSIZ; i <= WNDSIZ + UINT8_MAX; i++) { + mLevel[i] = 1; + mPosition[i] = NIL; /* sentinel */ + } + for (i = WNDSIZ; i < WNDSIZ * 2; i++) { + mParent[i] = NIL; + } + mAvail = 1; + for (i = 1; i < WNDSIZ - 1; i++) { + mNext[i] = (NODE)(i + 1); + } + + mNext[WNDSIZ - 1] = NIL; + for (i = WNDSIZ * 2; i <= MAX_HASH_VAL; i++) { + mNext[i] = NIL; + } +} + + +STATIC +NODE +Child ( + IN NODE q, + IN UINT8 c + ) +/*++ + +Routine Description: + + Find child node given the parent node and the edge character + +Arguments: + + q - the parent node + c - the edge character + +Returns: + + The child node (NIL if not found) + +--*/ +{ + NODE r; + + r = mNext[HASH(q, c)]; + mParent[NIL] = q; /* sentinel */ + while (mParent[r] != q) { + r = mNext[r]; + } + + return r; +} + +STATIC +VOID +MakeChild ( + IN NODE q, + IN UINT8 c, + IN NODE r + ) +/*++ + +Routine Description: + + Create a new child for a given parent node. + +Arguments: + + q - the parent node + c - the edge character + r - the child node + +Returns: (VOID) + +--*/ +{ + NODE h, t; + + h = (NODE)HASH(q, c); + t = mNext[h]; + mNext[h] = r; + mNext[r] = t; + mPrev[t] = r; + mPrev[r] = h; + mParent[r] = q; + mChildCount[q]++; +} + +STATIC +VOID +Split ( + NODE Old + ) +/*++ + +Routine Description: + + Split a node. + +Arguments: + + Old - the node to split + +Returns: (VOID) + +--*/ +{ + NODE New, t; + + New = mAvail; + mAvail = mNext[New]; + mChildCount[New] = 0; + t = mPrev[Old]; + mPrev[New] = t; + mNext[t] = New; + t = mNext[Old]; + mNext[New] = t; + mPrev[t] = New; + mParent[New] = mParent[Old]; + mLevel[New] = (UINT8)mMatchLen; + mPosition[New] = mPos; + MakeChild(New, mText[mMatchPos + mMatchLen], Old); + MakeChild(New, mText[mPos + mMatchLen], mPos); +} + +STATIC +VOID +InsertNode () +/*++ + +Routine Description: + + Insert string info for current position into the String Info Log + +Arguments: (VOID) + +Returns: (VOID) + +--*/ +{ + NODE q, r, j, t; + UINT8 c, *t1, *t2; + + if (mMatchLen >= 4) { + + // + // We have just got a long match, the target tree + // can be located by MatchPos + 1. Traverse the tree + // from bottom up to get to a proper starting point. + // The usage of PERC_FLAG ensures proper node deletion + // in DeleteNode() later. + // + + mMatchLen--; + r = (INT16)((mMatchPos + 1) | WNDSIZ); + while ((q = mParent[r]) == NIL) { + r = mNext[r]; + } + while (mLevel[q] >= mMatchLen) { + r = q; q = mParent[q]; + } + t = q; + while (mPosition[t] < 0) { + mPosition[t] = mPos; + t = mParent[t]; + } + if (t < WNDSIZ) { + mPosition[t] = (NODE)(mPos | PERC_FLAG); + } + } else { + + // + // Locate the target tree + // + + q = (INT16)(mText[mPos] + WNDSIZ); + c = mText[mPos + 1]; + if ((r = Child(q, c)) == NIL) { + MakeChild(q, c, mPos); + mMatchLen = 1; + return; + } + mMatchLen = 2; + } + + // + // Traverse down the tree to find a match. + // Update Position value along the route. + // Node split or creation is involved. + // + + for ( ; ; ) { + if (r >= WNDSIZ) { + j = MAXMATCH; + mMatchPos = r; + } else { + j = mLevel[r]; + mMatchPos = (NODE)(mPosition[r] & ~PERC_FLAG); + } + if (mMatchPos >= mPos) { + mMatchPos -= WNDSIZ; + } + t1 = &mText[mPos + mMatchLen]; + t2 = &mText[mMatchPos + mMatchLen]; + while (mMatchLen < j) { + if (*t1 != *t2) { + Split(r); + return; + } + mMatchLen++; + t1++; + t2++; + } + if (mMatchLen >= MAXMATCH) { + break; + } + mPosition[r] = mPos; + q = r; + if ((r = Child(q, *t1)) == NIL) { + MakeChild(q, *t1, mPos); + return; + } + mMatchLen++; + } + t = mPrev[r]; + mPrev[mPos] = t; + mNext[t] = mPos; + t = mNext[r]; + mNext[mPos] = t; + mPrev[t] = mPos; + mParent[mPos] = q; + mParent[r] = NIL; + + // + // Special usage of 'next' + // + mNext[r] = mPos; + +} + +STATIC +VOID +DeleteNode () +/*++ + +Routine Description: + + Delete outdated string info. (The Usage of PERC_FLAG + ensures a clean deletion) + +Arguments: (VOID) + +Returns: (VOID) + +--*/ +{ + NODE q, r, s, t, u; + + if (mParent[mPos] == NIL) { + return; + } + + r = mPrev[mPos]; + s = mNext[mPos]; + mNext[r] = s; + mPrev[s] = r; + r = mParent[mPos]; + mParent[mPos] = NIL; + if (r >= WNDSIZ || --mChildCount[r] > 1) { + return; + } + t = (NODE)(mPosition[r] & ~PERC_FLAG); + if (t >= mPos) { + t -= WNDSIZ; + } + s = t; + q = mParent[r]; + while ((u = mPosition[q]) & PERC_FLAG) { + u &= ~PERC_FLAG; + if (u >= mPos) { + u -= WNDSIZ; + } + if (u > s) { + s = u; + } + mPosition[q] = (INT16)(s | WNDSIZ); + q = mParent[q]; + } + if (q < WNDSIZ) { + if (u >= mPos) { + u -= WNDSIZ; + } + if (u > s) { + s = u; + } + mPosition[q] = (INT16)(s | WNDSIZ | PERC_FLAG); + } + s = Child(r, mText[t + mLevel[r]]); + t = mPrev[s]; + u = mNext[s]; + mNext[t] = u; + mPrev[u] = t; + t = mPrev[r]; + mNext[t] = s; + mPrev[s] = t; + t = mNext[r]; + mPrev[t] = s; + mNext[s] = t; + mParent[s] = mParent[r]; + mParent[r] = NIL; + mNext[r] = mAvail; + mAvail = r; +} + +STATIC +VOID +GetNextMatch () +/*++ + +Routine Description: + + Advance the current position (read in new data if needed). + Delete outdated string info. Find a match string for current position. + +Arguments: (VOID) + +Returns: (VOID) + +--*/ +{ + INT32 n; + + mRemainder--; + if (++mPos == WNDSIZ * 2) { + memmove(&mText[0], &mText[WNDSIZ], WNDSIZ + MAXMATCH); + n = FreadCrc(&mText[WNDSIZ + MAXMATCH], WNDSIZ); + mRemainder += n; + mPos = WNDSIZ; + } + DeleteNode(); + InsertNode(); +} + +STATIC +EFI_STATUS +Encode () +/*++ + +Routine Description: + + The main controlling routine for compression process. + +Arguments: (VOID) + +Returns: + + EFI_SUCCESS - The compression is successful + EFI_OUT_0F_RESOURCES - Not enough memory for compression process + +--*/ +{ + EFI_STATUS Status; + INT32 LastMatchLen; + NODE LastMatchPos; + + Status = AllocateMemory(); + if (EFI_ERROR(Status)) { + FreeMemory(); + return Status; + } + + InitSlide(); + + HufEncodeStart(); + + mRemainder = FreadCrc(&mText[WNDSIZ], WNDSIZ + MAXMATCH); + + mMatchLen = 0; + mPos = WNDSIZ; + InsertNode(); + if (mMatchLen > mRemainder) { + mMatchLen = mRemainder; + } + while (mRemainder > 0) { + LastMatchLen = mMatchLen; + LastMatchPos = mMatchPos; + GetNextMatch(); + if (mMatchLen > mRemainder) { + mMatchLen = mRemainder; + } + + if (mMatchLen > LastMatchLen || LastMatchLen < THRESHOLD) { + + // + // Not enough benefits are gained by outputting a pointer, + // so just output the original character + // + + Output(mText[mPos - 1], 0); + } else { + + // + // Outputting a pointer is beneficial enough, do it. + // + + Output(LastMatchLen + (UINT8_MAX + 1 - THRESHOLD), + (mPos - LastMatchPos - 2) & (WNDSIZ - 1)); + while (--LastMatchLen > 0) { + GetNextMatch(); + } + if (mMatchLen > mRemainder) { + mMatchLen = mRemainder; + } + } + } + + HufEncodeEnd(); + FreeMemory(); + return EFI_SUCCESS; +} + +STATIC +VOID +CountTFreq () +/*++ + +Routine Description: + + Count the frequencies for the Extra Set + +Arguments: (VOID) + +Returns: (VOID) + +--*/ +{ + INT32 i, k, n, Count; + + for (i = 0; i < NT; i++) { + mTFreq[i] = 0; + } + n = NC; + while (n > 0 && mCLen[n - 1] == 0) { + n--; + } + i = 0; + while (i < n) { + k = mCLen[i++]; + if (k == 0) { + Count = 1; + while (i < n && mCLen[i] == 0) { + i++; + Count++; + } + if (Count <= 2) { + mTFreq[0] = (UINT16)(mTFreq[0] + Count); + } else if (Count <= 18) { + mTFreq[1]++; + } else if (Count == 19) { + mTFreq[0]++; + mTFreq[1]++; + } else { + mTFreq[2]++; + } + } else { + mTFreq[k + 2]++; + } + } +} + +STATIC +VOID +WritePTLen ( + IN INT32 n, + IN INT32 nbit, + IN INT32 Special + ) +/*++ + +Routine Description: + + Outputs the code length array for the Extra Set or the Position Set. + +Arguments: + + n - the number of symbols + nbit - the number of bits needed to represent 'n' + Special - the special symbol that needs to be take care of + +Returns: (VOID) + +--*/ +{ + INT32 i, k; + + while (n > 0 && mPTLen[n - 1] == 0) { + n--; + } + PutBits(nbit, n); + i = 0; + while (i < n) { + k = mPTLen[i++]; + if (k <= 6) { + PutBits(3, k); + } else { + PutBits(k - 3, (1U << (k - 3)) - 2); + } + if (i == Special) { + while (i < 6 && mPTLen[i] == 0) { + i++; + } + PutBits(2, (i - 3) & 3); + } + } +} + +STATIC +VOID +WriteCLen () +/*++ + +Routine Description: + + Outputs the code length array for Char&Length Set + +Arguments: (VOID) + +Returns: (VOID) + +--*/ +{ + INT32 i, k, n, Count; + + n = NC; + while (n > 0 && mCLen[n - 1] == 0) { + n--; + } + PutBits(CBIT, n); + i = 0; + while (i < n) { + k = mCLen[i++]; + if (k == 0) { + Count = 1; + while (i < n && mCLen[i] == 0) { + i++; + Count++; + } + if (Count <= 2) { + for (k = 0; k < Count; k++) { + PutBits(mPTLen[0], mPTCode[0]); + } + } else if (Count <= 18) { + PutBits(mPTLen[1], mPTCode[1]); + PutBits(4, Count - 3); + } else if (Count == 19) { + PutBits(mPTLen[0], mPTCode[0]); + PutBits(mPTLen[1], mPTCode[1]); + PutBits(4, 15); + } else { + PutBits(mPTLen[2], mPTCode[2]); + PutBits(CBIT, Count - 20); + } + } else { + PutBits(mPTLen[k + 2], mPTCode[k + 2]); + } + } +} + +STATIC +VOID +EncodeC ( + IN INT32 c + ) +{ + PutBits(mCLen[c], mCCode[c]); +} + +STATIC +VOID +EncodeP ( + IN UINT32 p + ) +{ + UINT32 c, q; + + c = 0; + q = p; + while (q) { + q >>= 1; + c++; + } + PutBits(mPTLen[c], mPTCode[c]); + if (c > 1) { + PutBits(c - 1, p & (0xFFFFU >> (17 - c))); + } +} + +STATIC +VOID +SendBlock () +/*++ + +Routine Description: + + Huffman code the block and output it. + +Argument: (VOID) + +Returns: (VOID) + +--*/ +{ + UINT32 i, k, Flags, Root, Pos, Size; + Flags = 0; + + Root = MakeTree(NC, mCFreq, mCLen, mCCode); + Size = mCFreq[Root]; + PutBits(16, Size); + if (Root >= NC) { + CountTFreq(); + Root = MakeTree(NT, mTFreq, mPTLen, mPTCode); + if (Root >= NT) { + WritePTLen(NT, TBIT, 3); + } else { + PutBits(TBIT, 0); + PutBits(TBIT, Root); + } + WriteCLen(); + } else { + PutBits(TBIT, 0); + PutBits(TBIT, 0); + PutBits(CBIT, 0); + PutBits(CBIT, Root); + } + Root = MakeTree(NP, mPFreq, mPTLen, mPTCode); + if (Root >= NP) { + WritePTLen(NP, PBIT, -1); + } else { + PutBits(PBIT, 0); + PutBits(PBIT, Root); + } + Pos = 0; + for (i = 0; i < Size; i++) { + if (i % UINT8_BIT == 0) { + Flags = mBuf[Pos++]; + } else { + Flags <<= 1; + } + if (Flags & (1U << (UINT8_BIT - 1))) { + EncodeC(mBuf[Pos++] + (1U << UINT8_BIT)); + k = mBuf[Pos++] << UINT8_BIT; + k += mBuf[Pos++]; + EncodeP(k); + } else { + EncodeC(mBuf[Pos++]); + } + } + for (i = 0; i < NC; i++) { + mCFreq[i] = 0; + } + for (i = 0; i < NP; i++) { + mPFreq[i] = 0; + } +} + + +STATIC +VOID +Output ( + IN UINT32 c, + IN UINT32 p + ) +/*++ + +Routine Description: + + Outputs an Original Character or a Pointer + +Arguments: + + c - The original character or the 'String Length' element of a Pointer + p - The 'Position' field of a Pointer + +Returns: (VOID) + +--*/ +{ + STATIC UINT32 CPos; + + if ((mOutputMask >>= 1) == 0) { + mOutputMask = 1U << (UINT8_BIT - 1); + if (mOutputPos >= mBufSiz - 3 * UINT8_BIT) { + SendBlock(); + mOutputPos = 0; + } + CPos = mOutputPos++; + mBuf[CPos] = 0; + } + mBuf[mOutputPos++] = (UINT8) c; + mCFreq[c]++; + if (c >= (1U << UINT8_BIT)) { + mBuf[CPos] |= mOutputMask; + mBuf[mOutputPos++] = (UINT8)(p >> UINT8_BIT); + mBuf[mOutputPos++] = (UINT8) p; + c = 0; + while (p) { + p >>= 1; + c++; + } + mPFreq[c]++; + } +} + +STATIC +VOID +HufEncodeStart () +{ + INT32 i; + + for (i = 0; i < NC; i++) { + mCFreq[i] = 0; + } + for (i = 0; i < NP; i++) { + mPFreq[i] = 0; + } + mOutputPos = mOutputMask = 0; + InitPutBits(); + return; +} + +STATIC +VOID +HufEncodeEnd () +{ + SendBlock(); + + // + // Flush remaining bits + // + PutBits(UINT8_BIT - 1, 0); + + return; +} + + +STATIC +VOID +MakeCrcTable () +{ + UINT32 i, j, r; + + for (i = 0; i <= UINT8_MAX; i++) { + r = i; + for (j = 0; j < UINT8_BIT; j++) { + if (r & 1) { + r = (r >> 1) ^ CRCPOLY; + } else { + r >>= 1; + } + } + mCrcTable[i] = (UINT16)r; + } +} + +STATIC +VOID +PutBits ( + IN INT32 n, + IN UINT32 x + ) +/*++ + +Routine Description: + + Outputs rightmost n bits of x + +Arguments: + + n - the rightmost n bits of the data is used + x - the data + +Returns: (VOID) + +--*/ +{ + UINT8 Temp; + + if (n < mBitCount) { + mSubBitBuf |= x << (mBitCount -= n); + } else { + + Temp = (UINT8)(mSubBitBuf | (x >> (n -= mBitCount))); + if (mDst < mDstUpperLimit) { + *mDst++ = Temp; + } + mCompSize++; + + if (n < UINT8_BIT) { + mSubBitBuf = x << (mBitCount = UINT8_BIT - n); + } else { + + Temp = (UINT8)(x >> (n - UINT8_BIT)); + if (mDst < mDstUpperLimit) { + *mDst++ = Temp; + } + mCompSize++; + + mSubBitBuf = x << (mBitCount = 2 * UINT8_BIT - n); + } + } +} + +STATIC +INT32 +FreadCrc ( + OUT UINT8 *p, + IN INT32 n + ) +/*++ + +Routine Description: + + Read in source data + +Arguments: + + p - the buffer to hold the data + n - number of bytes to read + +Returns: + + number of bytes actually read + +--*/ +{ + INT32 i; + + for (i = 0; mSrc < mSrcUpperLimit && i < n; i++) { + *p++ = *mSrc++; + } + n = i; + + p -= n; + mOrigSize += n; + while (--i >= 0) { + UPDATE_CRC(*p++); + } + return n; +} + + +STATIC +VOID +InitPutBits () +{ + mBitCount = UINT8_BIT; + mSubBitBuf = 0; +} + +STATIC +VOID +CountLen ( + IN INT32 i + ) +/*++ + +Routine Description: + + Count the number of each code length for a Huffman tree. + +Arguments: + + i - the top node + +Returns: (VOID) + +--*/ +{ + STATIC INT32 Depth = 0; + + if (i < mN) { + mLenCnt[(Depth < 16) ? Depth : 16]++; + } else { + Depth++; + CountLen(mLeft [i]); + CountLen(mRight[i]); + Depth--; + } +} + +STATIC +VOID +MakeLen ( + IN INT32 Root + ) +/*++ + +Routine Description: + + Create code length array for a Huffman tree + +Arguments: + + Root - the root of the tree + +--*/ +{ + INT32 i, k; + UINT32 Cum; + + for (i = 0; i <= 16; i++) { + mLenCnt[i] = 0; + } + CountLen(Root); + + // + // Adjust the length count array so that + // no code will be generated longer than its designated length + // + + Cum = 0; + for (i = 16; i > 0; i--) { + Cum += mLenCnt[i] << (16 - i); + } + while (Cum != (1U << 16)) { + mLenCnt[16]--; + for (i = 15; i > 0; i--) { + if (mLenCnt[i] != 0) { + mLenCnt[i]--; + mLenCnt[i+1] += 2; + break; + } + } + Cum--; + } + for (i = 16; i > 0; i--) { + k = mLenCnt[i]; + while (--k >= 0) { + mLen[*mSortPtr++] = (UINT8)i; + } + } +} + +STATIC +VOID +DownHeap ( + IN INT32 i + ) +{ + INT32 j, k; + + // + // priority queue: send i-th entry down heap + // + + k = mHeap[i]; + while ((j = 2 * i) <= mHeapSize) { + if (j < mHeapSize && mFreq[mHeap[j]] > mFreq[mHeap[j + 1]]) { + j++; + } + if (mFreq[k] <= mFreq[mHeap[j]]) { + break; + } + mHeap[i] = mHeap[j]; + i = j; + } + mHeap[i] = (INT16)k; +} + +STATIC +VOID +MakeCode ( + IN INT32 n, + IN UINT8 Len[], + OUT UINT16 Code[] + ) +/*++ + +Routine Description: + + Assign code to each symbol based on the code length array + +Arguments: + + n - number of symbols + Len - the code length array + Code - stores codes for each symbol + +Returns: (VOID) + +--*/ +{ + INT32 i; + UINT16 Start[18]; + + Start[1] = 0; + for (i = 1; i <= 16; i++) { + Start[i + 1] = (UINT16)((Start[i] + mLenCnt[i]) << 1); + } + for (i = 0; i < n; i++) { + Code[i] = Start[Len[i]]++; + } +} + +STATIC +INT32 +MakeTree ( + IN INT32 NParm, + IN UINT16 FreqParm[], + OUT UINT8 LenParm[], + OUT UINT16 CodeParm[] + ) +/*++ + +Routine Description: + + Generates Huffman codes given a frequency distribution of symbols + +Arguments: + + NParm - number of symbols + FreqParm - frequency of each symbol + LenParm - code length for each symbol + CodeParm - code for each symbol + +Returns: + + Root of the Huffman tree. + +--*/ +{ + INT32 i, j, k, Avail; + + // + // make tree, calculate len[], return root + // + + mN = NParm; + mFreq = FreqParm; + mLen = LenParm; + Avail = mN; + mHeapSize = 0; + mHeap[1] = 0; + for (i = 0; i < mN; i++) { + mLen[i] = 0; + if (mFreq[i]) { + mHeap[++mHeapSize] = (INT16)i; + } + } + if (mHeapSize < 2) { + CodeParm[mHeap[1]] = 0; + return mHeap[1]; + } + for (i = mHeapSize / 2; i >= 1; i--) { + + // + // make priority queue + // + DownHeap(i); + } + mSortPtr = CodeParm; + do { + i = mHeap[1]; + if (i < mN) { + *mSortPtr++ = (UINT16)i; + } + mHeap[1] = mHeap[mHeapSize--]; + DownHeap(1); + j = mHeap[1]; + if (j < mN) { + *mSortPtr++ = (UINT16)j; + } + k = Avail++; + mFreq[k] = (UINT16)(mFreq[i] + mFreq[j]); + mHeap[1] = (INT16)k; + DownHeap(1); + mLeft[k] = (UINT16)i; + mRight[k] = (UINT16)j; + } while (mHeapSize > 1); + + mSortPtr = CodeParm; + MakeLen(k); + MakeCode(NParm, LenParm, CodeParm); + + // + // return root + // + return k; +} + diff --git a/src/util/efirom.c b/src/util/efirom.c index 93cd79fe9..95feaf239 100644 --- a/src/util/efirom.c +++ b/src/util/efirom.c @@ -34,10 +34,17 @@ #define eprintf(...) fprintf ( stderr, __VA_ARGS__ ) +/* Round up ROM size */ +#define ROM_SIZE( len ) ( ( (len) + 511 ) & ~511 ) + +/* Include the EDK2 compression code */ +#include "eficompress.c" + /** Command-line options */ struct options { uint16_t vendor; uint16_t device; + int compress; }; /** @@ -94,6 +101,35 @@ static void read_pe_info ( void *pe, uint16_t *machine, } } +/** + * Attempt to compress EFI data in-place + * + * @v data Data to be compressed + * @v max_len Length of data + * @ret len Length after attempted compression + */ +static size_t efi_compress ( void *data, size_t max_len ) { + void *tmp; + UINT32 len; + + /* Allocate temporary buffer for compressed data */ + tmp = xmalloc ( max_len ); + + /* Attempt compression */ + len = max_len; + if ( ( EfiCompress ( data, max_len, tmp, &len ) == 0 ) && + ( len < max_len ) ) { + memcpy ( data, tmp, len ); + } else { + len = max_len; + } + + /* Free temporary buffer */ + free ( tmp ); + + return len; +} + /** * Convert EFI image to ROM image * @@ -109,10 +145,14 @@ static void make_efi_rom ( FILE *pe, FILE *rom, struct options *opts ) { struct stat pe_stat; size_t pe_size; size_t rom_size; + size_t compressed_size; void *buf; void *payload; unsigned int i; + uint16_t machine; + uint16_t subsystem; uint8_t checksum; + int compressed; /* Determine PE file size */ if ( fstat ( fileno ( pe ), &pe_stat ) != 0 ) { @@ -123,7 +163,7 @@ static void make_efi_rom ( FILE *pe, FILE *rom, struct options *opts ) { pe_size = pe_stat.st_size; /* Determine ROM file size */ - rom_size = ( ( pe_size + sizeof ( *headers ) + 511 ) & ~511 ); + rom_size = ROM_SIZE ( sizeof ( *headers ) + pe_size ); /* Allocate ROM buffer and read in PE file */ buf = xmalloc ( rom_size ); @@ -136,12 +176,26 @@ static void make_efi_rom ( FILE *pe, FILE *rom, struct options *opts ) { exit ( 1 ); } + /* Parse PE headers */ + read_pe_info ( payload, &machine, &subsystem ); + + /* Compress the image, if requested */ + if ( opts->compress ) { + compressed_size = efi_compress ( payload, pe_size ); + rom_size = ROM_SIZE ( sizeof ( *headers ) + compressed_size ); + compressed = ( compressed_size < pe_size ); + } else { + compressed = 0; + } + /* Construct ROM header */ headers->rom.Signature = PCI_EXPANSION_ROM_HEADER_SIGNATURE; headers->rom.InitializationSize = ( rom_size / 512 ); headers->rom.EfiSignature = EFI_PCI_EXPANSION_ROM_HEADER_EFISIGNATURE; - read_pe_info ( payload, &headers->rom.EfiMachineType, - &headers->rom.EfiSubsystem ); + headers->rom.EfiSubsystem = subsystem; + headers->rom.EfiMachineType = machine; + headers->rom.CompressionType = + ( compressed ? EFI_PCI_EXPANSION_ROM_HEADER_COMPRESSED : 0 ); headers->rom.EfiImageHeaderOffset = sizeof ( *headers ); headers->rom.PcirOffset = offsetof ( typeof ( *headers ), pci ); @@ -194,11 +248,12 @@ static int parse_options ( const int argc, char **argv, static struct option long_options[] = { { "vendor", required_argument, NULL, 'v' }, { "device", required_argument, NULL, 'd' }, + { "compress", 0, NULL, 'c' }, { "help", 0, NULL, 'h' }, { 0, 0, 0, 0 } }; - if ( ( c = getopt_long ( argc, argv, "v:d:h", + if ( ( c = getopt_long ( argc, argv, "v:d:ch", long_options, &option_index ) ) == -1 ) { break; @@ -207,18 +262,21 @@ static int parse_options ( const int argc, char **argv, switch ( c ) { case 'v': opts->vendor = strtoul ( optarg, &end, 16 ); - if ( *end ) { + if ( *end || ( ! *optarg ) ) { eprintf ( "Invalid vendor \"%s\"\n", optarg ); exit ( 2 ); } break; case 'd': opts->device = strtoul ( optarg, &end, 16 ); - if ( *end ) { + if ( *end || ( ! *optarg ) ) { eprintf ( "Invalid device \"%s\"\n", optarg ); exit ( 2 ); } break; + case 'c': + opts->compress = 1; + break; case 'h': print_help ( argv[0] ); exit ( 0 ); diff --git a/src/util/elf2efi.c b/src/util/elf2efi.c index bcd53c9af..8af53aeb4 100644 --- a/src/util/elf2efi.c +++ b/src/util/elf2efi.c @@ -38,6 +38,9 @@ #define eprintf(...) fprintf ( stderr, __VA_ARGS__ ) +#undef ELF_R_TYPE +#undef ELF_R_SYM + #ifdef EFI_TARGET32 #define EFI_IMAGE_NT_HEADERS EFI_IMAGE_NT_HEADERS32 @@ -72,21 +75,46 @@ #define ELF_MREL( mach, type ) ( (mach) | ( (type) << 16 ) ) -/* Allow for building with older versions of elf.h */ +/* Provide constants missing on some platforms */ #ifndef EM_AARCH64 #define EM_AARCH64 183 +#endif +#ifndef R_AARCH64_NONE #define R_AARCH64_NONE 0 +#endif +#ifndef R_AARCH64_NULL +#define R_AARCH64_NULL 256 +#endif +#ifndef R_AARCH64_ABS64 #define R_AARCH64_ABS64 257 +#endif +#ifndef R_AARCH64_CALL26 #define R_AARCH64_CALL26 283 +#endif +#ifndef R_AARCH64_JUMP26 #define R_AARCH64_JUMP26 282 +#endif +#ifndef R_AARCH64_ADR_PREL_LO21 #define R_AARCH64_ADR_PREL_LO21 274 +#endif +#ifndef R_AARCH64_ADR_PREL_PG_HI21 #define R_AARCH64_ADR_PREL_PG_HI21 275 +#endif +#ifndef R_AARCH64_ADD_ABS_LO12_NC #define R_AARCH64_ADD_ABS_LO12_NC 277 +#endif +#ifndef R_AARCH64_LDST8_ABS_LO12_NC #define R_AARCH64_LDST8_ABS_LO12_NC 278 +#endif +#ifndef R_AARCH64_LDST16_ABS_LO12_NC #define R_AARCH64_LDST16_ABS_LO12_NC 284 +#endif +#ifndef R_AARCH64_LDST32_ABS_LO12_NC #define R_AARCH64_LDST32_ABS_LO12_NC 285 +#endif +#ifndef R_AARCH64_LDST64_ABS_LO12_NC #define R_AARCH64_LDST64_ABS_LO12_NC 286 -#endif /* EM_AARCH64 */ +#endif #ifndef R_ARM_CALL #define R_ARM_CALL 28 #endif @@ -97,12 +125,22 @@ #define R_ARM_V4BX 40 #endif -/* Seems to be missing from elf.h */ -#ifndef R_AARCH64_NULL -#define R_AARCH64_NULL 256 -#endif +/** + * Alignment of raw data of sections in the image file + * + * Some versions of signtool.exe will spuriously complain if this + * value is less than 512. + */ +#define EFI_FILE_ALIGN 0x200 -#define EFI_FILE_ALIGN 0x20 +/** + * Alignment of sections when loaded into memory + * + * This must equal the architecture page size, in order to allow for + * the possibility of the firmware using page-level protection to + * enforce section attributes at runtime. + */ +#define EFI_IMAGE_ALIGN 0x1000 struct elf_file { void *data; @@ -150,9 +188,9 @@ static struct pe_header efi_pe_header = { .Magic = EFI_IMAGE_NT_OPTIONAL_HDR_MAGIC, .MajorLinkerVersion = 42, .MinorLinkerVersion = 42, - .SectionAlignment = EFI_FILE_ALIGN, + .SectionAlignment = EFI_IMAGE_ALIGN, .FileAlignment = EFI_FILE_ALIGN, - .SizeOfImage = sizeof ( efi_pe_header ), + .SizeOfImage = EFI_IMAGE_ALIGN, .SizeOfHeaders = sizeof ( efi_pe_header ), .NumberOfRvaAndSizes = EFI_IMAGE_NUMBER_OF_DIRECTORY_ENTRIES, @@ -193,6 +231,16 @@ static unsigned long efi_file_align ( unsigned long offset ) { return ( ( offset + EFI_FILE_ALIGN - 1 ) & ~( EFI_FILE_ALIGN - 1 ) ); } +/** + * Align section within PE image + * + * @v offset Unaligned offset + * @ret aligned_offset Aligned offset + */ +static unsigned long efi_image_align ( unsigned long offset ) { + return ( ( offset + EFI_IMAGE_ALIGN - 1 ) & ~( EFI_IMAGE_ALIGN - 1 ) ); +} + /** * Generate entry in PE relocation table * @@ -582,7 +630,7 @@ static struct pe_section * process_section ( struct elf_file *elf, pe_header->nt.FileHeader.NumberOfSections++; pe_header->nt.OptionalHeader.SizeOfHeaders += sizeof ( new->hdr ); pe_header->nt.OptionalHeader.SizeOfImage = - efi_file_align ( data_end ); + efi_image_align ( data_end ); return new; } @@ -705,13 +753,15 @@ static struct pe_section * create_reloc_section ( struct pe_header *pe_header, struct pe_relocs *pe_reltab ) { struct pe_section *reloc; + size_t section_rawsz; size_t section_memsz; size_t section_filesz; EFI_IMAGE_DATA_DIRECTORY *relocdir; /* Allocate PE section */ - section_memsz = output_pe_reltab ( pe_reltab, NULL ); - section_filesz = efi_file_align ( section_memsz ); + section_rawsz = output_pe_reltab ( pe_reltab, NULL ); + section_filesz = efi_file_align ( section_rawsz ); + section_memsz = efi_image_align ( section_rawsz ); reloc = xmalloc ( sizeof ( *reloc ) + section_filesz ); memset ( reloc, 0, sizeof ( *reloc ) + section_filesz ); @@ -722,6 +772,7 @@ create_reloc_section ( struct pe_header *pe_header, reloc->hdr.VirtualAddress = pe_header->nt.OptionalHeader.SizeOfImage; reloc->hdr.SizeOfRawData = section_filesz; reloc->hdr.Characteristics = ( EFI_IMAGE_SCN_CNT_INITIALIZED_DATA | + EFI_IMAGE_SCN_MEM_DISCARDABLE | EFI_IMAGE_SCN_MEM_NOT_PAGED | EFI_IMAGE_SCN_MEM_READ ); @@ -731,11 +782,11 @@ create_reloc_section ( struct pe_header *pe_header, /* Update file header details */ pe_header->nt.FileHeader.NumberOfSections++; pe_header->nt.OptionalHeader.SizeOfHeaders += sizeof ( reloc->hdr ); - pe_header->nt.OptionalHeader.SizeOfImage += section_filesz; + pe_header->nt.OptionalHeader.SizeOfImage += section_memsz; relocdir = &(pe_header->nt.OptionalHeader.DataDirectory [EFI_IMAGE_DIRECTORY_ENTRY_BASERELOC]); relocdir->VirtualAddress = reloc->hdr.VirtualAddress; - relocdir->Size = reloc->hdr.Misc.VirtualSize; + relocdir->Size = section_rawsz; return reloc; } @@ -773,8 +824,8 @@ create_debug_section ( struct pe_header *pe_header, const char *filename ) { } *contents; /* Allocate PE section */ - section_memsz = sizeof ( *contents ); - section_filesz = efi_file_align ( section_memsz ); + section_memsz = efi_image_align ( sizeof ( *contents ) ); + section_filesz = efi_file_align ( sizeof ( *contents ) ); debug = xmalloc ( sizeof ( *debug ) + section_filesz ); memset ( debug, 0, sizeof ( *debug ) + section_filesz ); contents = ( void * ) debug->contents; @@ -786,6 +837,7 @@ create_debug_section ( struct pe_header *pe_header, const char *filename ) { debug->hdr.VirtualAddress = pe_header->nt.OptionalHeader.SizeOfImage; debug->hdr.SizeOfRawData = section_filesz; debug->hdr.Characteristics = ( EFI_IMAGE_SCN_CNT_INITIALIZED_DATA | + EFI_IMAGE_SCN_MEM_DISCARDABLE | EFI_IMAGE_SCN_MEM_NOT_PAGED | EFI_IMAGE_SCN_MEM_READ ); debug->fixup = fixup_debug_section; @@ -805,7 +857,7 @@ create_debug_section ( struct pe_header *pe_header, const char *filename ) { /* Update file header details */ pe_header->nt.FileHeader.NumberOfSections++; pe_header->nt.OptionalHeader.SizeOfHeaders += sizeof ( debug->hdr ); - pe_header->nt.OptionalHeader.SizeOfImage += section_filesz; + pe_header->nt.OptionalHeader.SizeOfImage += section_memsz; debugdir = &(pe_header->nt.OptionalHeader.DataDirectory [EFI_IMAGE_DIRECTORY_ENTRY_DEBUG]); debugdir->VirtualAddress = debug->hdr.VirtualAddress; @@ -996,7 +1048,7 @@ static int parse_options ( const int argc, char **argv, switch ( c ) { case 's': opts->subsystem = strtoul ( optarg, &end, 0 ); - if ( *end ) { + if ( *end || ( ! *optarg ) ) { eprintf ( "Invalid subsytem \"%s\"\n", optarg ); exit ( 2 ); diff --git a/src/util/genefidsk b/src/util/genefidsk deleted file mode 100755 index 7064f99b6..000000000 --- a/src/util/genefidsk +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/sh -# -# Generate an EFI bootable disk image - -set -e - -function help() { - echo "Usage: ${0} [OPTIONS] " - echo - echo "where OPTIONS are:" - echo " -h Show this help" - echo " -b Specify boot file name (e.g. bootx64.efi)" - echo " -o FILE Save disk image to file" -} - -BOOT=bootx64.efi - -while getopts "hb:o:" opt; do - case ${opt} in - h) - help - exit 0 - ;; - b) - BOOT="${OPTARG}" - ;; - o) - OUT="${OPTARG}" - ;; - esac -done - -shift $((OPTIND - 1)) -IN=$1 - -if [ -z "${IN}" ]; then - echo "${0}: no input file given" >&2 - help - exit 1 -fi - -if [ -z "${OUT}" ]; then - echo "${0}: no output file given" >&2 - help - exit 1 -fi - -# Create sparse output file -rm -f ${OUT} -truncate -s 1440K ${OUT} - -# Format disk -mformat -i ${OUT} -f 1440 :: - -# Create directory structure -mmd -i ${OUT} ::efi -mmd -i ${OUT} ::efi/boot - -# Copy bootable image -mcopy -i ${OUT} ${IN} ::efi/boot/${BOOT} diff --git a/src/util/genfsimg b/src/util/genfsimg new file mode 100755 index 000000000..c13158203 --- /dev/null +++ b/src/util/genfsimg @@ -0,0 +1,320 @@ +#!/bin/sh +# +# Generate a filesystem-based image + +set -e +set -u + +# Print usage message +# +help() { + echo "usage: ${0} [OPTIONS] foo.lkrn|foo.efi [bar.lkrn|bar.efi,...]" + echo + echo "where OPTIONS are:" + echo " -h show this help" + echo " -o FILE save image to file" + echo " -p PAD pad filesystem (in kB)" + echo " -s SCRIPT use executable script" +} + +# Get hex byte from binary file +# +get_byte() { + local FILENAME + local OFFSET + + FILENAME="${1}" + OFFSET="${2}" + + od -j "${OFFSET}" -N 1 -A n -t x1 -- "${FILENAME}" | tr -d " " +} + +# Get hex word from binary file +# +get_word() { + local FILENAME + local OFFSET + + FILENAME="${1}" + OFFSET="${2}" + + od -j "${OFFSET}" -N 2 -A n -t x1 -- "${FILENAME}" | tr -d " " +} + +# Get appropriate EFI boot filename for CPU architecture +# +efi_boot_name() { + local FILENAME + local PESIG + local ARCH + + FILENAME="${1}" + + MZSIG=$(get_word "${FILENAME}" 0) + if [ "${MZSIG}" != "4d5a" ] ; then + echo "${FILENAME}: invalid MZ header" >&2 + exit 1 + fi + PEOFF=$(get_byte "${FILENAME}" 0x3c) + PESIG=$(get_word "${FILENAME}" 0x${PEOFF}) + if [ "${PESIG}" != "5045" ] ; then + echo "${FILENAME}: invalid PE header" >&2 + exit 1 + fi + ARCH=$(get_word "${FILENAME}" $(( 0x${PEOFF} + 4 )) ) + case "${ARCH}" in + "4c01" ) + echo "BOOTIA32.EFI" + ;; + "6486" ) + echo "BOOTX64.EFI" + ;; + "c201" ) + echo "BOOTARM.EFI" + ;; + "64aa" ) + echo "BOOTAA64.EFI" + ;; + * ) + echo "${FILENAME}: unrecognised EFI architecture ${ARCH}" >&2 + exit 1 + esac +} + +# Copy syslinux file +# +copy_syslinux_file() { + local FILENAME + local DESTDIR + local SRCDIR + + FILENAME="${1}" + DESTDIR="${2}" + + for SRCDIR in \ + /usr/lib/syslinux \ + /usr/lib/syslinux/bios \ + /usr/lib/syslinux/modules/bios \ + /usr/share/syslinux \ + /usr/share/syslinux/bios \ + /usr/share/syslinux/modules/bios \ + /usr/local/share/syslinux \ + /usr/local/share/syslinux/bios \ + /usr/local/share/syslinux/bios/core \ + /usr/local/share/syslinux/bios/com32/elflink/ldlinux \ + /usr/local/share/syslinux/modules/bios \ + /usr/lib/ISOLINUX \ + ; do + if [ -e "${SRCDIR}/${FILENAME}" ] ; then + install -m 644 "${SRCDIR}/${FILENAME}" "${DESTDIR}/" + return 0 + fi + done + echo "${0}: could not find ${FILENAME}" >&2 + return 1 +} + +# Parse command-line options +# +OUTFILE= +PAD=0 +SCRIPT= +while getopts "hlo:p:s:" OPTION ; do + case "${OPTION}" in + h) + help + exit 0 + ;; + o) + OUTFILE="${OPTARG}" + ;; + p) + PAD="${OPTARG}" + ;; + s) + SCRIPT="${OPTARG}" + ;; + *) + help + exit 1 + ;; + esac +done +if [ -z "${OUTFILE}" ]; then + echo "${0}: no output file given" >&2 + help + exit 1 +fi +shift $(( OPTIND - 1 )) +if [ $# -eq 0 ] ; then + echo "${0}: no input files given" >&2 + help + exit 1 +fi + +# Create temporary working directory +# +WORKDIR=$(mktemp -d "${OUTFILE}.XXXXXX") +ISODIR="${WORKDIR}/iso" +FATDIR="${WORKDIR}/fat" +mkdir -p "${ISODIR}" "${FATDIR}" + +# Configure output +# +case "${OUTFILE}" in + *.iso) + ISOIMG="${OUTFILE}" + FATIMG="${ISODIR}/esp.img" + BIOSDIR="${ISODIR}" + SYSLINUXCFG="${ISODIR}/isolinux.cfg" + ;; + *) + ISOIMG= + FATIMG="${OUTFILE}" + BIOSDIR="${FATDIR}" + SYSLINUXCFG="${FATDIR}/syslinux.cfg" + ;; +esac + +# Copy files to temporary working directory +# +LKRN= +EFI= +for FILENAME ; do + case "${FILENAME}" in + *.lkrn) + DESTDIR="${BIOSDIR}" + DESTFILE=$(basename "${FILENAME}") + if [ -z "${LKRN}" ] ; then + echo "SAY iPXE boot image" > "${SYSLINUXCFG}" + echo "TIMEOUT 30" >> "${SYSLINUXCFG}" + echo "DEFAULT ${DESTFILE}" >> "${SYSLINUXCFG}" + if [ -n "${SCRIPT}" ] ; then + cp "${SCRIPT}" "${BIOSDIR}/autoexec.ipxe" + fi + fi + echo "LABEL ${DESTFILE}" >> "${SYSLINUXCFG}" + echo " KERNEL ${DESTFILE}" >> "${SYSLINUXCFG}" + if [ -n "${SCRIPT}" ] ; then + echo " APPEND initrd=autoexec.ipxe" >> "${SYSLINUXCFG}" + fi + LKRN=1 + ;; + *.efi) + DESTDIR="${FATDIR}/EFI/BOOT" + DESTFILE=$(efi_boot_name "${FILENAME}") + if [ -z "${EFI}" ] ; then + mkdir -p "${DESTDIR}" + if [ -n "${SCRIPT}" ] ; then + cp "${SCRIPT}" "${FATDIR}/autoexec.ipxe" + fi + fi + EFI=1 + ;; + *) + echo "${0}: unrecognised input filename ${FILENAME}" >&2 + help + exit 1 + ;; + esac + if [ -e "${DESTDIR}/${DESTFILE}" ] ; then + echo "${0}: duplicate ${DESTFILE} from ${FILENAME}" >&2 + exit 1 + fi + cp "${FILENAME}" "${DESTDIR}/${DESTFILE}" +done + +# Configure ISO image, if applicable +# +# Note that the BIOS boot files are required even for an EFI-only ISO, +# since isohybrid will refuse to work without them. +# +if [ -n "${ISOIMG}" ] ; then + ISOARGS="-J -R -l" + copy_syslinux_file "isolinux.bin" "${ISODIR}" + copy_syslinux_file "ldlinux.c32" "${ISODIR}" 2>/dev/null || true + ISOARGS="${ISOARGS} -no-emul-boot -eltorito-boot isolinux.bin" + ISOARGS="${ISOARGS} -boot-load-size 4 -boot-info-table" + if [ -n "${EFI}" ] ; then + ISOARGS="${ISOARGS} -eltorito-alt-boot -no-emul-boot -e esp.img" + else + FATIMG= + fi + if [ -n "${SOURCE_DATE_EPOCH:-}" ] ; then + DATE_FMT="+%Y%m%d%H%M%S00" + BUILD_DATE=$(date -u -d "@${SOURCE_DATE_EPOCH}" "${DATE_FMT}" \ + 2>/dev/null || \ + date -u -r "${SOURCE_DATE_EPOCH}" "${DATE_FMT}" \ + 2>/dev/null || \ + date -u "${DATE_FMT}") + ISOARGS="${ISOARGS} --set_all_file_dates ${BUILD_DATE}" + ISOARGS="${ISOARGS} --modification-date=${BUILD_DATE}" + fi +fi + +# Create FAT filesystem image, if applicable +# +if [ -n "${FATIMG}" ] ; then + FATSIZE=$(du -s -k "${FATDIR}" | cut -f1) + FATSIZE=$(( FATSIZE + PAD + 256 )) + touch "${FATIMG}" + if [ "${FATSIZE}" -le "1440" ] ; then + FATSIZE=1440 + FATARGS="-f 1440" + else + FATCYLS=$(( ( FATSIZE + 503 ) / 504 )) + FATSIZE=$(( FATCYLS * 504 )) + FATARGS="-s 63 -h 16 -t ${FATCYLS}" + fi + truncate -s "${FATSIZE}K" "${FATIMG}" + mformat -v iPXE -i "${FATIMG}" ${FATARGS} :: + mcopy -i "${FATIMG}" -s "${FATDIR}"/* :: + if [ "${BIOSDIR}" = "${FATDIR}" ] ; then + syslinux "${FATIMG}" + fi +fi + +# Create ISO filesystem image, if applicable +# +if [ -n "${ISOIMG}" ] ; then + MKISOFS= + MKISOFS_MISSING= + MKISOFS_NOTSUP= + for CMD in genisoimage mkisofs xorrisofs ; do + if ! "${CMD}" --version >/dev/null 2>&1 ; then + MKISOFS_MISSING="${MKISOFS_MISSING} ${CMD}" + continue + fi + if ! "${CMD}" ${ISOARGS} --version "${ISODIR}" >/dev/null 2>&1 ; then + MKISOFS_NOTSUP="${MKISOFS_NOTSUP} ${CMD}" + continue + fi + MKISOFS="${CMD}" + break + done + if [ -z "${MKISOFS}" ] ; then + if [ -n "${MKISOFS_MISSING}" ] ; then + echo "${0}:${MKISOFS_MISSING}: not installed" >&2 + fi + if [ -n "${MKISOFS_NOTSUP}" ] ; then + echo "${0}:${MKISOFS_NOTSUP}: cannot handle ${ISOARGS}" >&2 + fi + echo "${0}: cannot find a suitable mkisofs or equivalent" >&2 + exit 1 + fi + "${MKISOFS}" -quiet -volid "iPXE" -preparer "iPXE build system" \ + -appid "iPXE - Open Source Network Boot Firmware" \ + -publisher "ipxe.org" -sysid "iPXE" -o "${ISOIMG}" \ + ${ISOARGS} "${ISODIR}" + if isohybrid --version >/dev/null 2>&1 ; then + ISOHYBRIDARGS= + if [ -n "${SOURCE_DATE_EPOCH:-}" ] ; then + ISOHYBRIDARGS="${ISOHYBRIDARGS} --id ${SOURCE_DATE_EPOCH}" + fi + isohybrid ${ISOHYBRIDARGS} "${ISOIMG}" + fi +fi + +# Clean up temporary working directory +# +rm -rf "${WORKDIR}" diff --git a/src/util/geniso b/src/util/geniso deleted file mode 100755 index ff090d4a0..000000000 --- a/src/util/geniso +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash -# -# Generate a isolinux ISO boot image - -function help() { - echo "usage: ${0} [OPTIONS] foo.lkrn [bar.lkrn,...]" - echo - echo "where OPTIONS are:" - echo " -h show this help" - echo " -l build legacy image with floppy emulation" - echo " -o FILE save iso image to file" -} - -LEGACY=0 -FIRST="" - -while getopts "hlo:" opt; do - case ${opt} in - h) - help - exit 0 - ;; - l) - LEGACY=1 - ;; - o) - OUT="${OPTARG}" - ;; - esac -done - -shift $((OPTIND - 1)) - -if [ -z "${OUT}" ]; then - echo "${0}: no output file given" >&2 - help - exit 1 -fi - -# There should either be mkisofs or the compatible genisoimage program -for command in genisoimage mkisofs; do - if ${command} --version >/dev/null 2>/dev/null; then - mkisofs=(${command}) - break - fi -done - -if [ -z "${mkisofs}" ]; then - echo "${0}: mkisofs or genisoimage not found, please install or set PATH" >&2 - exit 1 -fi - -dir=$(mktemp -d bin/iso.dir.XXXXXX) -cfg=${dir}/isolinux.cfg - -mkisofs+=(-quiet -l -volid "iPXE" -preparer "iPXE build system" - -appid "iPXE ${VERSION} - Open Source Network Boot Firmware" - -publisher "http://ipxe.org/" -c boot.cat) - -# generate the config -cat > ${cfg} <&2 - continue - fi - b=$(basename ${f}) - g=${b%.lkrn} - g=${g//[^a-z0-9]} - g=${g:0:8}.krn - case "${FIRST}" in - "") - echo "DEFAULT ${b}" - FIRST=${g} - ;; - esac - echo "LABEL ${b}" - echo " KERNEL ${g}" - cp ${f} ${dir}/${g} -done >> ${cfg} - -case "${LEGACY}" in - 1) - # check for mtools - case "$(mtools -V)" in - Mtools\ version\ 3.9.9*|Mtools\ version\ 3.9.1[0-9]*|[mM]tools\ *\ [4-9].*) - ;; - *) - echo "Mtools version 3.9.9 or later is required" >&2 - exit 1 - ;; - esac - - # generate floppy image - img=${dir}/boot.img - mformat -f 1440 -C -i ${img} :: - - # copy lkrn file to floppy image - for f in ${dir}/*.krn; do - mcopy -m -i ${img} ${f} ::$(basename ${g}) - rm -f ${f} - done - - # copy config file to floppy image - mcopy -i ${img} ${cfg} ::syslinux.cfg - rm -f ${cfg} - - # write syslinux bootloader to floppy image - if ! syslinux ${img}; then - echo "${0}: failed writing syslinux to floppy image ${img}" >&2 - exit 1 - fi - - # generate the iso image - "${mkisofs[@]}" -b boot.img -output ${OUT} ${dir} - ;; - 0) - # copy isolinux bootloader - cp ${ISOLINUX_BIN} ${dir} - - # syslinux 6.x needs a file called ldlinux.c32 - if [ -n "${LDLINUX_C32}" -a -s "${LDLINUX_C32}" ]; then - cp ${LDLINUX_C32} ${dir} - fi - - # generate the iso image - "${mkisofs[@]}" -b isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -output ${OUT} ${dir} - - # isohybrid will be used if available - if isohybrid --version >/dev/null 2>/dev/null; then - isohybrid ${OUT} >/dev/null - fi - ;; -esac - -# clean up temporary dir -rm -fr ${dir} diff --git a/src/util/gensdsk b/src/util/gensdsk deleted file mode 100755 index 9e8361d49..000000000 --- a/src/util/gensdsk +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash -# -# Generate a syslinux floppy that loads a iPXE image -# -# gensdsk foo.sdsk foo.lkrn -# -# the floppy image is the first argument -# followed by list of .lkrn images -# - -case $# in -0|1) - echo Usage: $0 foo.sdsk foo.lkrn ... - exit 1 - ;; -esac -case "`mtools -V`" in -Mtools\ version\ 3.9.9*|Mtools\ version\ 3.9.1[0-9]*|[mM]tools\ *\ [4-9].*) - ;; -*) - echo Mtools version 3.9.9 or later is required - exit 1 - ;; -esac -img=$1 -shift -dir=`mktemp -d bin/sdsk.dir.XXXXXX` - -mformat -f 1440 -C -i $img :: -cfg=$dir/syslinux.cfg -cat > $cfg <&2 - continue - fi - # shorten name for 8.3 filesystem - b=$(basename $f) - g=${b%.lkrn} - g=${g//[^a-z0-9]} - g=${g:0:8}.krn - case "$first" in - "") - echo DEFAULT $g - ;; - esac - first=$g - echo LABEL $b - echo "" KERNEL $g - mcopy -m -i $img $f ::$g -done >> $cfg -mcopy -i $img $cfg ::syslinux.cfg -if ! syslinux $img -then - exit 1 -fi - -rm -fr $dir diff --git a/src/util/iccfix.c b/src/util/iccfix.c deleted file mode 100644 index 528bf4b26..000000000 --- a/src/util/iccfix.c +++ /dev/null @@ -1,157 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DEBUG 0 - -#define eprintf(...) fprintf ( stderr, __VA_ARGS__ ) - -#define dprintf(...) do { \ - if ( DEBUG ) \ - fprintf ( stderr, __VA_ARGS__ ); \ - } while ( 0 ) - -#ifdef SELF_INCLUDED - -/** - * Fix up ICC alignments - * - * @v elf ELF header - * @ret rc Return status code - * - * See comments in tables.h for an explanation of why this monstrosity - * is necessary. - */ -static int ICCFIX ( void *elf ) { - ELF_EHDR *ehdr = elf; - ELF_SHDR *shdr = ( elf + ehdr->e_shoff ); - size_t shentsize = ehdr->e_shentsize; - unsigned int shnum = ehdr->e_shnum; - ELF_SHDR *strtab = ( ( ( void * ) shdr ) + - ( ehdr->e_shstrndx * shentsize ) ); - char *strings = ( elf + strtab->sh_offset ); - - for ( ; shnum-- ; shdr = ( ( ( void * ) shdr ) + shentsize ) ) { - char *name = ( strings + shdr->sh_name ); - unsigned long align = shdr->sh_addralign; - unsigned long new_align; - - if ( ( strncmp ( name, ".tbl.", 5 ) == 0 ) && - ( align >= ICC_ALIGN_HACK_FACTOR ) ) { - new_align = ( align / ICC_ALIGN_HACK_FACTOR ); - shdr->sh_addralign = new_align; - dprintf ( "Section \"%s\": alignment %ld->%ld\n", - name, align, new_align ); - } - } - return 0; -} - -#else /* SELF_INCLUDED */ - -#define SELF_INCLUDED - -/* Include iccfix32() function */ -#define ELF_EHDR Elf32_Ehdr -#define ELF_SHDR Elf32_Shdr -#define ICCFIX iccfix32 -#include "iccfix.c" -#undef ELF_EHDR -#undef ELF_SHDR -#undef ICCFIX - -/* Include iccfix64() function */ -#define ELF_EHDR Elf64_Ehdr -#define ELF_SHDR Elf64_Shdr -#define ICCFIX iccfix64 -#include "iccfix.c" -#undef ELF_EHDR -#undef ELF_SHDR -#undef ICCFIX - -static int iccfix ( const char *filename ) { - int fd; - struct stat stat; - void *elf; - unsigned char *eident; - int rc; - - /* Open and mmap file */ - fd = open ( filename, O_RDWR ); - if ( fd < 0 ) { - eprintf ( "Could not open %s: %s\n", - filename, strerror ( errno ) ); - rc = -1; - goto err_open; - } - if ( fstat ( fd, &stat ) < 0 ) { - eprintf ( "Could not determine size of %s: %s\n", - filename, strerror ( errno ) ); - rc = -1; - goto err_fstat; - } - elf = mmap ( NULL, stat.st_size, ( PROT_READ | PROT_WRITE ), - MAP_SHARED, fd, 0 ); - if ( elf == MAP_FAILED ) { - eprintf ( "Could not map %s: %s\n", - filename, strerror ( errno ) ); - rc = -1; - goto err_mmap; - } - - /* Perform fixups */ - eident = elf; - switch ( eident[EI_CLASS] ) { - case ELFCLASS32: - rc = iccfix32 ( elf ); - break; - case ELFCLASS64: - rc = iccfix64 ( elf ); - break; - default: - eprintf ( "Unknown ELF class %d in %s\n", - eident[EI_CLASS], filename ); - rc = -1; - break; - } - - munmap ( elf, stat.st_size ); - err_mmap: - err_fstat: - close ( fd ); - err_open: - return rc; -} - -int main ( int argc, char **argv ) { - int i; - int rc; - - /* Parse command line */ - if ( argc < 2 ) { - eprintf ( "Syntax: %s ...\n", argv[0] ); - exit ( 1 ); - } - - /* Process each object in turn */ - for ( i = 1 ; i < argc ; i++ ) { - if ( ( rc = iccfix ( argv[i] ) ) != 0 ) { - eprintf ( "Could not fix up %s\n", argv[i] ); - exit ( 1 ); - } - } - - return 0; -} - -#endif /* SELF_INCLUDED */