--- /dev/null
+78729af5bc58d0dab74128fba27f21d6bf851c83 304
+78729af5bc58d0dab74128fba27f21d6bf851c83 default
--- /dev/null
+[paths]
+default = ssh://software.sandia.gov//home/ktpedre/kitten
--- /dev/null
+revlogv1
+store
--- /dev/null
+# use glob syntax.
+syntax: glob
+
+*.o
+*.cmd
+*.out
+vmlwk
+vmlwk.*
+System.map
+*.swp
+.config
+.config.old
+.kconfig.d
+.version
+.kernelrelease
+patch
+out
+lib.a
+*.iso
+arch/x86_64/boot/bootsect
+arch/x86_64/boot/bzImage
+arch/x86_64/boot/setup
+arch/x86_64/boot/tools/build
+arch/x86_64/kernel/asm-offsets.s
+bochs/bochsout.txt
+include/arch-x86_64/asm-offsets.h
+include/lwk/autoconf.h
+include/lwk/compile.h
+include/lwk/version.h
+scripts/basic/docproc
+scripts/basic/fixdep
+scripts/basic/split-include
+scripts/kconfig/conf
+scripts/kconfig/lex.zconf.c
+scripts/kconfig/zconf.hash.c
+scripts/kconfig/zconf.tab.c
+scripts/kallsyms
+.tmp_*
+include/arch
+init_task
+user/hello_world/hello_world
+user/liblwk/liblwk.a
+
+
+syntax: regexp
+^include/config
--- /dev/null
+Kitten is derived from the Linux kernel and is licensed under the same
+GPL v2 license as the Linux kernel. The full Linux COPYING file is
+listed below and applies to the Kitten kernel. Most of the user space
+programs and libraries in the /user directory are licensed under the
+LGPL (each directory should have its own COPYING file).
+
+Files containing:
+
+ /* Copyright (c) 2008, Sandia National Laboratories */
+
+or similar (year(s) changed) have been either written solely by Sandia
+National Laboratories or significantly modified from the source code
+upon which the file is derived. When a file is derived from other
+source code, the original copyright headers and license are maintained.
+
+----------------------------------------
+
+ NOTE! This copyright does *not* cover user programs that use kernel
+ services by normal system calls - this is merely considered normal use
+ of the kernel, and does *not* fall under the heading of "derived work".
+ Also note that the GPL below is copyrighted by the Free Software
+ Foundation, but the instance of code that it refers to (the Linux
+ kernel) is copyrighted by me and others who actually wrote it.
+
+ Also note that the only valid version of the GPL as far as the kernel
+ is concerned is _this_ particular version of the license (ie v2, not
+ v2.2 or v3.x or whatever), unless explicitly otherwise stated.
+
+ Linus Torvalds
+
+----------------------------------------
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+\f
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+\f
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+\f
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+\f
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+\f
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
--- /dev/null
+#
+# Kbuild for top-level directory of the kernel
+# This file takes care of the following:
+# 1) Generate asm-offsets.h
+
+#####
+# 1) Generate asm-offsets.h
+#
+
+offsets-file := include/arch-$(ARCH)/asm-offsets.h
+
+always := $(offsets-file)
+targets := $(offsets-file)
+targets += arch/$(ARCH)/kernel/asm-offsets.s
+
+# Default sed regexp - multiline due to syntax constraints
+define sed-y
+ "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
+endef
+# Override default regexp for specific architectures
+sed-$(CONFIG_MIPS) := "/^@@@/{s/^@@@//; s/ \#.*\$$//; p;}"
+
+quiet_cmd_offsets = GEN $@
+define cmd_offsets
+ (set -e; \
+ echo "#ifndef __ASM_OFFSETS_H__"; \
+ echo "#define __ASM_OFFSETS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by $(srctree)/Kbuild"; \
+ echo " *"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-y) $<; \
+ echo ""; \
+ echo "#endif" ) > $@
+endef
+
+# We use internal kbuild rules to avoid the "is up to date" message from make
+arch/$(ARCH)/kernel/asm-offsets.s: arch/$(ARCH)/kernel/asm-offsets.c FORCE
+ $(Q)mkdir -p $(dir $@)
+ $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(offsets-file): arch/$(ARCH)/kernel/asm-offsets.s Kbuild
+ $(Q)mkdir -p $(dir $@)
+ $(call cmd,offsets)
+
--- /dev/null
+VERSION = 1
+PATCHLEVEL = 0
+SUBLEVEL = 0
+EXTRAVERSION = Kitten
+NAME=Kitten
+
+
+# *DOCUMENTATION*
+# To see a list of typical targets execute "make help"
+# More info can be located in ./README
+# Comments in this file are targeted only to the developer, do not
+# expect to learn how to build the kernel reading this file.
+
+# Do not print "Entering directory ..."
+MAKEFLAGS += --no-print-directory
+
+# We are using a recursive build, so we need to do a little thinking
+# to get the ordering right.
+#
+# Most importantly: sub-Makefiles should only ever modify files in
+# their own directory. If in some directory we have a dependency on
+# a file in another dir (which doesn't happen often, but it's often
+# unavoidable when linking the built-in.o targets which finally
+# turn into vmlwk), we will call a sub make in that other dir, and
+# after that we are sure that everything which is in that other dir
+# is now up to date.
+#
+# The only cases where we need to modify files which have global
+# effects are thus separated out and done before the recursive
+# descending is started. They are now explicitly listed as the
+# prepare rule.
+
+# To put more focus on warnings, be less verbose as default
+# Use 'make V=1' to see the full commands
+
+ifdef V
+ ifeq ("$(origin V)", "command line")
+ KBUILD_VERBOSE = $(V)
+ endif
+endif
+ifndef KBUILD_VERBOSE
+ KBUILD_VERBOSE = 0
+endif
+
+# Call sparse as part of compilation of C files
+# Use 'make C=1' to enable sparse checking
+
+ifdef C
+ ifeq ("$(origin C)", "command line")
+ KBUILD_CHECKSRC = $(C)
+ endif
+endif
+ifndef KBUILD_CHECKSRC
+ KBUILD_CHECKSRC = 0
+endif
+
+# Use make M=dir to specify directory of external module to build
+# Old syntax make ... SUBDIRS=$PWD is still supported
+# Setting the environment variable KBUILD_EXTMOD take precedence
+ifdef SUBDIRS
+ KBUILD_EXTMOD ?= $(SUBDIRS)
+endif
+ifdef M
+ ifeq ("$(origin M)", "command line")
+ KBUILD_EXTMOD := $(M)
+ endif
+endif
+
+
+# kbuild supports saving output files in a separate directory.
+# To locate output files in a separate directory two syntaxes are supported.
+# In both cases the working directory must be the root of the kernel src.
+# 1) O=
+# Use "make O=dir/to/store/output/files/"
+#
+# 2) Set KBUILD_OUTPUT
+# Set the environment variable KBUILD_OUTPUT to point to the directory
+# where the output files shall be placed.
+# export KBUILD_OUTPUT=dir/to/store/output/files/
+# make
+#
+# The O= assignment takes precedence over the KBUILD_OUTPUT environment
+# variable.
+
+
+# KBUILD_SRC is set on invocation of make in OBJ directory
+# KBUILD_SRC is not intended to be used by the regular user (for now)
+ifeq ($(KBUILD_SRC),)
+
+# OK, Make called in directory where kernel src resides
+# Do we want to locate output files in a separate directory?
+ifdef O
+ ifeq ("$(origin O)", "command line")
+ KBUILD_OUTPUT := $(O)
+ endif
+endif
+
+# That's our default target when none is given on the command line
+PHONY := _all
+_all:
+
+ifneq ($(KBUILD_OUTPUT),)
+# Invoke a second make in the output directory, passing relevant variables
+# check that the output directory actually exists
+saved-output := $(KBUILD_OUTPUT)
+KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd)
+$(if $(KBUILD_OUTPUT),, \
+ $(error output directory "$(saved-output)" does not exist))
+
+PHONY += $(MAKECMDGOALS)
+
+$(filter-out _all,$(MAKECMDGOALS)) _all:
+ $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
+ KBUILD_SRC=$(CURDIR) \
+ KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $@
+
+# Leave processing to above invocation of make
+skip-makefile := 1
+endif # ifneq ($(KBUILD_OUTPUT),)
+endif # ifeq ($(KBUILD_SRC),)
+
+# We process the rest of the Makefile if this is the final invocation of make
+ifeq ($(skip-makefile),)
+
+# If building an external module we do not care about the all: rule
+# but instead _all depend on modules
+PHONY += all
+ifeq ($(KBUILD_EXTMOD),)
+_all: all
+else
+_all: modules
+endif
+
+srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR))
+TOPDIR := $(srctree)
+# FIXME - TOPDIR is obsolete, use srctree/objtree
+objtree := $(CURDIR)
+src := $(srctree)
+obj := $(objtree)
+
+VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD))
+
+export srctree objtree VPATH TOPDIR
+
+
+# SUBARCH tells the usermode build what the underlying arch is. That is set
+# first, and if a usermode build is happening, the "ARCH=um" on the command
+# line overrides the setting of ARCH below. If a native build is happening,
+# then ARCH is assigned, getting whatever value it gets normally, and
+# SUBARCH is subsequently ignored.
+
+SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+ -e s/arm.*/arm/ -e s/sa110/arm/ \
+ -e s/s390x/s390/ -e s/parisc64/parisc/ \
+ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ )
+
+# Cross compiling and selecting different set of gcc/bin-utils
+# ---------------------------------------------------------------------------
+#
+# When performing cross compilation for other architectures ARCH shall be set
+# to the target architecture. (See arch/* for the possibilities).
+# ARCH can be set during invocation of make:
+# make ARCH=ia64
+# Another way is to have ARCH set in the environment.
+# The default ARCH is the host where make is executed.
+
+# CROSS_COMPILE specify the prefix used for all executables used
+# during compilation. Only gcc and related bin-utils executables
+# are prefixed with $(CROSS_COMPILE).
+# CROSS_COMPILE can be set on the command line
+# make CROSS_COMPILE=ia64-linux-
+# Alternatively CROSS_COMPILE can be set in the environment.
+# Default value for CROSS_COMPILE is not to prefix executables
+# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
+
+ARCH ?= $(SUBARCH)
+CROSS_COMPILE ?=
+
+# Architecture as present in compile.h
+UTS_MACHINE := $(ARCH)
+
+# SHELL used by kbuild
+CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ else if [ -x /bin/bash ]; then echo /bin/bash; \
+ else echo sh; fi ; fi)
+
+HOSTCC = gcc
+HOSTCXX = g++
+HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
+HOSTCXXFLAGS = -O2
+
+# Decide whether to build built-in, modular, or both.
+# Normally, just do built-in.
+
+KBUILD_MODULES :=
+KBUILD_BUILTIN := 1
+
+# If we have only "make modules", don't compile built-in objects.
+# When we're building modules with modversions, we need to consider
+# the built-in objects during the descend as well, in order to
+# make sure the checksums are uptodate before we record them.
+
+ifeq ($(MAKECMDGOALS),modules)
+ KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
+endif
+
+# If we have "make <whatever> modules", compile modules
+# in addition to whatever we do anyway.
+# Just "make" or "make all" shall build modules as well
+
+ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
+ KBUILD_MODULES := 1
+endif
+
+ifeq ($(MAKECMDGOALS),)
+ KBUILD_MODULES := 1
+endif
+
+export KBUILD_MODULES KBUILD_BUILTIN
+export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
+
+# Beautify output
+# ---------------------------------------------------------------------------
+#
+# Normally, we echo the whole command before executing it. By making
+# that echo $($(quiet)$(cmd)), we now have the possibility to set
+# $(quiet) to choose other forms of output instead, e.g.
+#
+# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
+# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
+#
+# If $(quiet) is empty, the whole command will be printed.
+# If it is set to "quiet_", only the short version will be printed.
+# If it is set to "silent_", nothing wil be printed at all, since
+# the variable $(silent_cmd_cc_o_c) doesn't exist.
+#
+# A simple variant is to prefix commands with $(Q) - that's useful
+# for commands that shall be hidden in non-verbose mode.
+#
+# $(Q)ln $@ :<
+#
+# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
+# If KBUILD_VERBOSE equals 1 then the above command is displayed.
+
+ifeq ($(KBUILD_VERBOSE),1)
+ quiet =
+ Q =
+else
+ quiet=quiet_
+ Q = @
+endif
+
+# If the user is running make -s (silent mode), suppress echoing of
+# commands
+
+ifneq ($(findstring s,$(MAKEFLAGS)),)
+ quiet=silent_
+endif
+
+export quiet Q KBUILD_VERBOSE
+
+
+# Look for make include files relative to root of kernel src
+MAKEFLAGS += --include-dir=$(srctree)
+
+# We need some generic definitions
+include $(srctree)/scripts/Kbuild.include
+
+# For maximum performance (+ possibly random breakage, uncomment
+# the following)
+
+#MAKEFLAGS += -rR
+
+# Make variables (CC, etc...)
+
+AS = $(CROSS_COMPILE)as
+LD = $(CROSS_COMPILE)ld
+CC = $(CROSS_COMPILE)gcc
+CPP = $(CC) -E
+AR = $(CROSS_COMPILE)ar
+NM = $(CROSS_COMPILE)nm
+STRIP = $(CROSS_COMPILE)strip
+OBJCOPY = $(CROSS_COMPILE)objcopy
+OBJDUMP = $(CROSS_COMPILE)objdump
+AWK = awk
+GENKSYMS = scripts/genksyms/genksyms
+DEPMOD = /sbin/depmod
+KALLSYMS = scripts/kallsyms
+PERL = perl
+CHECK = sparse
+
+CHECKFLAGS := -D__lwk__ -Dlwk -D__STDC__ -Dunix -D__unix__ -Wbitwise $(CF)
+MODFLAGS = -DMODULE
+CFLAGS_MODULE = $(MODFLAGS)
+AFLAGS_MODULE = $(MODFLAGS)
+LDFLAGS_MODULE = -r
+CFLAGS_KERNEL =
+AFLAGS_KERNEL =
+
+
+# Use LWKINCLUDE when you must reference the include/ directory.
+# Needed to be compatible with the O= option
+LWKINCLUDE := -Iinclude \
+ $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \
+ -include include/lwk/autoconf.h
+
+CPPFLAGS := -D__KERNEL__ $(LWKINCLUDE)
+
+CFLAGS := -std=gnu99 \
+ -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common
+
+ifeq ($(call cc-option-yn, -fstack-protector),y)
+CFLAGS += -fno-stack-protector
+endif
+
+ifeq ($(call cc-option-yn, -fgnu89-inline),y)
+CFLAGS += -fgnu89-inline
+endif
+
+AFLAGS := -D__ASSEMBLY__
+
+# Read KERNELRELEASE from .kernelrelease (if it exists)
+KERNELRELEASE = $(shell cat .kernelrelease 2> /dev/null)
+KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
+
+export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION \
+ ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
+ CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \
+ HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
+
+export CPPFLAGS NOSTDINC_FLAGS LWKINCLUDE OBJCOPYFLAGS LDFLAGS
+export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+export AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
+
+# When compiling out-of-tree modules, put MODVERDIR in the module
+# tree rather than in the kernel tree. The kernel tree might
+# even be read-only.
+export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_versions
+
+# Files to ignore in find ... statements
+
+RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg -o -name .git \) -prune -o
+export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg --exclude .git
+
+# ===========================================================================
+# Rules shared between *config targets and build targets
+
+# Basic helpers built in scripts/
+PHONY += scripts_basic
+scripts_basic:
+ $(Q)$(MAKE) $(build)=scripts/basic
+
+# To avoid any implicit rule to kick in, define an empty command.
+scripts/basic/%: scripts_basic ;
+
+PHONY += outputmakefile
+# outputmakefile generates a Makefile in the output directory, if using a
+# separate output directory. This allows convenient use of make in the
+# output directory.
+outputmakefile:
+ifneq ($(KBUILD_SRC),)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \
+ $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
+endif
+
+# To make sure we do not include .config for any of the *config targets
+# catch them early, and hand them over to scripts/kconfig/Makefile
+# It is allowed to specify more targets when calling make, including
+# mixing *config targets and build targets.
+# For example 'make oldconfig all'.
+# Detect when mixed targets is specified, and make a second invocation
+# of make so .config is not included in this case either (for *config).
+
+no-dot-config-targets := clean mrproper distclean \
+ cscope TAGS tags help %docs check%
+
+config-targets := 0
+mixed-targets := 0
+dot-config := 1
+
+ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
+ ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
+ dot-config := 0
+ endif
+endif
+
+ifeq ($(KBUILD_EXTMOD),)
+ ifneq ($(filter config %config,$(MAKECMDGOALS)),)
+ config-targets := 1
+ ifneq ($(filter-out config %config,$(MAKECMDGOALS)),)
+ mixed-targets := 1
+ endif
+ endif
+endif
+
+ifeq ($(mixed-targets),1)
+# ===========================================================================
+# We're called with mixed targets (*config and build targets).
+# Handle them one by one.
+
+%:: FORCE
+ $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= $@
+
+else
+ifeq ($(config-targets),1)
+# ===========================================================================
+# *config targets only - make sure prerequisites are updated, and descend
+# in scripts/kconfig to make the *config target
+
+# Read arch specific Makefile to set KBUILD_DEFCONFIG as needed.
+# KBUILD_DEFCONFIG may point out an alternative default configuration
+# used for 'make defconfig'
+include $(srctree)/arch/$(ARCH)/Makefile
+export KBUILD_DEFCONFIG
+
+config %config: scripts_basic outputmakefile FORCE
+ $(Q)mkdir -p include/lwk
+ $(Q)$(MAKE) $(build)=scripts/kconfig $@
+ $(Q)$(MAKE) -C $(srctree) KBUILD_SRC= .kernelrelease
+
+else
+# ===========================================================================
+# Build targets only - this includes vmlwk, arch specific targets, clean
+# targets and others. In general all targets except *config targets.
+
+ifeq ($(KBUILD_EXTMOD),)
+# Additional helpers built in scripts/
+# Carefully list dependencies so we do not try to build scripts twice
+# in parrallel
+PHONY += scripts
+scripts: scripts_basic include/config/MARKER
+ $(Q)$(MAKE) $(build)=$(@)
+
+scripts_basic: include/lwk/autoconf.h
+
+# Objects we will link into vmlwk / subdirs we need to visit
+init-y := init/
+drivers-y := drivers/
+#net-y := net/
+libs-y := lib/
+#core-y := usr/
+endif # KBUILD_EXTMOD
+
+ifeq ($(dot-config),1)
+# In this section, we need .config
+
+# Read in dependencies to all Kconfig* files, make sure to run
+# oldconfig if changes are detected.
+-include .kconfig.d
+
+include .config
+
+# If .config needs to be updated, it will be done via the dependency
+# that autoconf has on .config.
+# To avoid any implicit rule to kick in, define an empty command
+.config .kconfig.d: ;
+
+# If .config is newer than include/lwk/autoconf.h, someone tinkered
+# with it and forgot to run make oldconfig.
+# If kconfig.d is missing then we are probarly in a cleaned tree so
+# we execute the config step to be sure to catch updated Kconfig files
+include/lwk/autoconf.h: .kconfig.d .config
+ $(Q)mkdir -p include/lwk
+ $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
+else
+# Dummy target needed, because used as prerequisite
+include/lwk/autoconf.h: ;
+endif
+
+DEFAULT_EXTRA_TARGETS=vmlwk.bin vmlwk.asm init_task
+
+# The all: target is the default when no target is given on the
+# command line.
+# This allow a user to issue only 'make' to build a kernel including modules
+# Defaults vmlwk but it is usually overriden in the arch makefile
+all: vmlwk $(DEFAULT_EXTRA_TARGETS)
+
+ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+CFLAGS += -Os
+else
+CFLAGS += -O2
+endif
+
+ifdef CONFIG_FRAME_POINTER
+CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,)
+else
+CFLAGS += -fomit-frame-pointer
+endif
+
+ifdef CONFIG_UNWIND_INFO
+CFLAGS += -fasynchronous-unwind-tables
+endif
+
+ifdef CONFIG_DEBUG_INFO
+CFLAGS += -g
+endif
+
+include $(srctree)/arch/$(ARCH)/Makefile
+
+# arch Makefile may override CC so keep this after arch Makefile is included
+NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
+CHECKFLAGS += $(NOSTDINC_FLAGS)
+
+# disable pointer signedness warnings in gcc 4.0
+CFLAGS += $(call cc-option,-Wno-pointer-sign,)
+
+# Default kernel image to build when no specific target is given.
+# KBUILD_IMAGE may be overruled on the commandline or
+# set in the environment
+# Also any assignments in arch/$(ARCH)/Makefile take precedence over
+# this default value
+export KBUILD_IMAGE ?= vmlwk
+
+#
+# INSTALL_PATH specifies where to place the updated kernel and system map
+# images. Default is /boot, but you can set it to other values
+export INSTALL_PATH ?= /boot
+
+#
+# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
+# relocations required by build roots. This is not defined in the
+# makefile but the arguement can be passed to make if needed.
+#
+
+MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
+export MODLIB
+
+
+ifeq ($(KBUILD_EXTMOD),)
+#core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
+core-y += kernel/ mm/
+
+vmlwk-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+ $(net-y) $(net-m) $(libs-y) $(libs-m)))
+
+vmlwk-alldirs := $(sort $(vmlwk-dirs) $(patsubst %/,%,$(filter %/, \
+ $(init-n) $(init-) \
+ $(core-n) $(core-) $(drivers-n) $(drivers-) \
+ $(net-n) $(net-) $(libs-n) $(libs-))))
+
+init-y := $(patsubst %/, %/built-in.o, $(init-y))
+core-y := $(patsubst %/, %/built-in.o, $(core-y))
+drivers-y := $(patsubst %/, %/built-in.o, $(drivers-y))
+net-y := $(patsubst %/, %/built-in.o, $(net-y))
+libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
+libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y))
+libs-y := $(libs-y1) $(libs-y2)
+
+# Build vmlwk
+# ---------------------------------------------------------------------------
+# vmlwk is build from the objects selected by $(vmlwk-init) and
+# $(vmlwk-main). Most are built-in.o files from top-level directories
+# in the kernel tree, others are specified in arch/$(ARCH)Makefile.
+# Ordering when linking is important, and $(vmlwk-init) must be first.
+#
+# vmlwk
+# ^
+# |
+# +-< $(vmlwk-init)
+# | +--< init/version.o + more
+# |
+# +--< $(vmlwk-main)
+# | +--< driver/built-in.o mm/built-in.o + more
+# |
+# +-< kallsyms.o (see description in CONFIG_KALLSYMS section)
+#
+# vmlwk version (uname -v) cannot be updated during normal
+# descending-into-subdirs phase since we do not yet know if we need to
+# update vmlwk.
+# Therefore this step is delayed until just before final link of vmlwk -
+# except in the kallsyms case where it is done just before adding the
+# symbols to the kernel.
+#
+# System.map is generated to document addresses of all kernel symbols
+
+vmlwk-init := $(head-y) $(init-y)
+vmlwk-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+vmlwk-all := $(vmlwk-init) $(vmlwk-main)
+vmlwk-lds := arch/$(ARCH)/kernel/vmlwk.lds
+
+# Rule to link vmlwk - also used during CONFIG_KALLSYMS
+# May be overridden by arch/$(ARCH)/Makefile
+quiet_cmd_vmlwk__ ?= LD $@
+ cmd_vmlwk__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlwk) -o $@ \
+ -T $(vmlwk-lds) $(vmlwk-init) \
+ --start-group $(vmlwk-main) --end-group \
+ $(filter-out $(vmlwk-lds) $(vmlwk-init) $(vmlwk-main) FORCE ,$^)
+
+# Generate new vmlwk version
+quiet_cmd_vmlwk_version = GEN .version
+ cmd_vmlwk_version = set -e; \
+ if [ ! -r .version ]; then \
+ rm -f .version; \
+ echo 1 >.version; \
+ else \
+ mv .version .old_version; \
+ expr 0$$(cat .old_version) + 1 >.version; \
+ fi; \
+ $(MAKE) $(build)=init
+
+# Generate System.map
+quiet_cmd_sysmap = SYSMAP
+ cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
+
+# Link of vmlwk
+# If CONFIG_KALLSYMS is set .version is already updated
+# Generate System.map and verify that the content is consistent
+# Use + in front of the vmlwk_version rule to silent warning with make -j2
+# First command is ':' to allow us to use + in front of the rule
+define rule_vmlwk__
+ :
+ $(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlwk_version))
+
+ $(call cmd,vmlwk__)
+ $(Q)echo 'cmd_$@ := $(cmd_vmlwk__)' > $(@D)/.$(@F).cmd
+
+ $(Q)$(if $($(quiet)cmd_sysmap), \
+ echo ' $($(quiet)cmd_sysmap) System.map' &&) \
+ $(cmd_sysmap) $@ System.map; \
+ if [ $$? -ne 0 ]; then \
+ rm -f $@; \
+ /bin/false; \
+ fi;
+ $(verify_kallsyms)
+endef
+
+
+ifdef CONFIG_KALLSYMS
+# Generate section listing all symbols and add it into vmlwk $(kallsyms.o)
+# It's a three stage process:
+# o .tmp_vmlwk1 has all symbols and sections, but __kallsyms is
+# empty
+# Running kallsyms on that gives us .tmp_kallsyms1.o with
+# the right size - vmlwk version (uname -v) is updated during this step
+# o .tmp_vmlwk2 now has a __kallsyms section of the right size,
+# but due to the added section, some addresses have shifted.
+# From here, we generate a correct .tmp_kallsyms2.o
+# o The correct .tmp_kallsyms2.o is linked into the final vmlwk.
+# o Verify that the System.map from vmlwk matches the map from
+# .tmp_vmlwk2, just in case we did not generate kallsyms correctly.
+# o If CONFIG_KALLSYMS_EXTRA_PASS is set, do an extra pass using
+# .tmp_vmlwk3 and .tmp_kallsyms3.o. This is only meant as a
+# temporary bypass to allow the kernel to be built while the
+# maintainers work out what went wrong with kallsyms.
+
+ifdef CONFIG_KALLSYMS_EXTRA_PASS
+last_kallsyms := 3
+else
+last_kallsyms := 2
+endif
+
+kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
+
+define verify_kallsyms
+ $(Q)$(if $($(quiet)cmd_sysmap), \
+ echo ' $($(quiet)cmd_sysmap) .tmp_System.map' &&) \
+ $(cmd_sysmap) .tmp_vmlwk$(last_kallsyms) .tmp_System.map
+ $(Q)cmp -s System.map .tmp_System.map || \
+ (echo Inconsistent kallsyms data; \
+ echo Try setting CONFIG_KALLSYMS_EXTRA_PASS; \
+ rm .tmp_kallsyms* ; /bin/false )
+endef
+
+# Update vmlwk version before link
+# Use + in front of this rule to silent warning about make -j1
+# First command is ':' to allow us to use + in front of this rule
+cmd_ksym_ld = $(cmd_vmlwk__)
+define rule_ksym_ld
+ :
+ +$(call cmd,vmlwk_version)
+ $(call cmd,vmlwk__)
+ $(Q)echo 'cmd_$@ := $(cmd_vmlwk__)' > $(@D)/.$(@F).cmd
+endef
+
+# Generate .S file with all kernel symbols
+quiet_cmd_kallsyms = KSYM $@
+ cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \
+ $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@
+
+.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE
+ $(call if_changed_dep,as_o_S)
+
+.tmp_kallsyms%.S: .tmp_vmlwk% $(KALLSYMS)
+ $(call cmd,kallsyms)
+
+# .tmp_vmlwk1 must be complete except kallsyms, so update vmlwk version
+.tmp_vmlwk1: $(vmlwk-lds) $(vmlwk-all) FORCE
+ $(call if_changed_rule,ksym_ld)
+
+.tmp_vmlwk2: $(vmlwk-lds) $(vmlwk-all) .tmp_kallsyms1.o FORCE
+ $(call if_changed,vmlwk__)
+
+.tmp_vmlwk3: $(vmlwk-lds) $(vmlwk-all) .tmp_kallsyms2.o FORCE
+ $(call if_changed,vmlwk__)
+
+# Needs to visit scripts/ before $(KALLSYMS) can be used.
+$(KALLSYMS): scripts ;
+
+# Generate some data for debugging strange kallsyms problems
+debug_kallsyms: .tmp_map$(last_kallsyms)
+
+.tmp_map%: .tmp_vmlwk% FORCE
+ ($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@
+
+.tmp_map3: .tmp_map2
+
+.tmp_map2: .tmp_map1
+
+endif # ifdef CONFIG_KALLSYMS
+
+# vmlwk image - including updated kernel symbols
+vmlwk: $(vmlwk-lds) $(vmlwk-init) $(vmlwk-main) $(kallsyms.o) FORCE
+ $(call if_changed_rule,vmlwk__)
+ $(Q)rm -f .old_version
+
+vmlwk.bin: vmlwk FORCE
+ $(OBJCOPY) -O binary $< $@
+
+vmlwk.asm: vmlwk
+ $(OBJDUMP) --disassemble $< > $@
+
+# The actual objects are generated when descending,
+# make sure no implicit rule kicks in
+$(sort $(vmlwk-init) $(vmlwk-main)) $(vmlwk-lds): $(vmlwk-dirs) ;
+
+# Handle descending into subdirectories listed in $(vmlwk-dirs)
+# Preset locale variables to speed up the build process. Limit locale
+# tweaks to this spot to avoid wrong language settings when running
+# make menuconfig etc.
+# Error messages still appears in the original language
+
+PHONY += $(vmlwk-dirs)
+$(vmlwk-dirs): prepare scripts
+ $(Q)$(MAKE) $(build)=$@
+
+# Build the kernel release string
+# The KERNELRELEASE is stored in a file named .kernelrelease
+# to be used when executing for example make install or make modules_install
+#
+# Take the contents of any files called localversion* and the config
+# variable CONFIG_LOCALVERSION and append them to KERNELRELEASE.
+# LOCALVERSION from the command line override all of this
+
+nullstring :=
+space := $(nullstring) # end of line
+
+___localver = $(objtree)/localversion* $(srctree)/localversion*
+__localver = $(sort $(wildcard $(___localver)))
+# skip backup files (containing '~')
+_localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
+
+localver = $(subst $(space),, \
+ $(shell cat /dev/null $(_localver)) \
+ $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
+
+# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
+# and if the SCM is know a tag from the SCM is appended.
+# The appended tag is determinded by the SCM used.
+#
+# Currently, only git is supported.
+# Other SCMs can edit scripts/setlocalversion and add the appropriate
+# checks as needed.
+ifdef CONFIG_LOCALVERSION_AUTO
+ _localver-auto = $(shell $(CONFIG_SHELL) \
+ $(srctree)/scripts/setlocalversion $(srctree))
+ localver-auto = $(LOCALVERSION)$(_localver-auto)
+endif
+
+localver-full = $(localver)$(localver-auto)
+
+# Store (new) KERNELRELASE string in .kernelrelease
+kernelrelease = $(KERNELVERSION)$(localver-full)
+.kernelrelease: FORCE
+ $(Q)rm -f $@
+ $(Q)echo $(kernelrelease) > $@
+
+
+# Things we need to do before we recursively start building the kernel
+# or the modules are listed in "prepare".
+# A multi level approach is used. prepareN is processed before prepareN-1.
+# archprepare is used in arch Makefiles and when processed arch symlink,
+# version.h and scripts_basic is processed / created.
+
+# Listed in dependency order
+PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
+
+# prepare-all is deprecated, use prepare as valid replacement
+PHONY += prepare-all
+
+# prepare3 is used to check if we are building in a separate output directory,
+# and if so do:
+# 1) Check that make has not been executed in the kernel src $(srctree)
+# 2) Create the include2 directory, used for the second arch symlink
+prepare3: .kernelrelease
+ifneq ($(KBUILD_SRC),)
+ @echo ' Using $(srctree) as source for kernel'
+ $(Q)if [ -f $(srctree)/.config ]; then \
+ echo " $(srctree) is not clean, please run 'make mrproper'";\
+ echo " in the '$(srctree)' directory.";\
+ /bin/false; \
+ fi;
+ $(Q)if [ ! -d include2 ]; then mkdir -p include2; fi;
+ $(Q)ln -fsn $(srctree)/include/arch-$(ARCH) include2/arch
+endif
+
+# prepare2 creates a makefile if using a separate output directory
+prepare2: prepare3 outputmakefile
+
+prepare1: prepare2 include/lwk/version.h include/arch \
+ include/config/MARKER
+ifneq ($(KBUILD_MODULES),)
+ $(Q)mkdir -p $(MODVERDIR)
+ $(Q)rm -f $(MODVERDIR)/*
+endif
+
+archprepare: prepare1 scripts_basic
+
+prepare0: archprepare FORCE
+ $(Q)$(MAKE) $(build)=.
+
+# All the preparing..
+prepare prepare-all: prepare0
+
+# Leave this as default for preprocessing vmlwk.lds.S, which is now
+# done in arch/$(ARCH)/kernel/Makefile
+
+export CPPFLAGS_vmlwk.lds += -P -C -U$(ARCH)
+
+# FIXME: The arch symlink changes when $(ARCH) changes. That's
+# hard to detect, but I suppose "make mrproper" is a good idea
+# before switching between archs anyway.
+
+include/arch:
+ @echo ' SYMLINK $@ -> include/arch-$(ARCH)'
+ $(Q)if [ ! -d include ]; then mkdir -p include; fi;
+ @ln -fsn arch-$(ARCH) $@
+
+# Split autoconf.h into include/lwk/config/*
+
+include/config/MARKER: scripts/basic/split-include include/lwk/autoconf.h
+ @echo ' SPLIT include/lwk/autoconf.h -> include/config/*'
+ @scripts/basic/split-include include/lwk/autoconf.h include/config
+ @touch $@
+
+# Generate some files
+# ---------------------------------------------------------------------------
+
+# KERNELRELEASE can change from a few different places, meaning version.h
+# needs to be updated, so this check is forced on all builds
+
+uts_len := 64
+
+define filechk_version.h
+ if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \
+ echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \
+ exit 1; \
+ fi; \
+ (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\"; \
+ echo \#define LWK_VERSION_CODE `expr $(VERSION) \\* 65536 + $(PATCHLEVEL) \\* 256 + $(SUBLEVEL)`; \
+ echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'; \
+ )
+endef
+
+include/lwk/version.h: $(srctree)/Makefile .config .kernelrelease FORCE
+ $(call filechk,version.h)
+
+# ---------------------------------------------------------------------------
+
+PHONY += depend dep
+depend dep:
+ @echo '*** Warning: make $@ is unnecessary now.'
+
+# ---------------------------------------------------------------------------
+# Kernel headers
+INSTALL_HDR_PATH=$(MODLIB)/abi
+export INSTALL_HDR_PATH
+
+PHONY += headers_install
+headers_install: include/lwk/version.h
+ $(Q)unifdef -Ux /dev/null
+ $(Q)rm -rf $(INSTALL_HDR_PATH)/include
+ $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.headersinst obj=include
+
+PHONY += headers_check
+headers_check: headers_install
+ $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.headersinst obj=include HDRCHECK=1
+
+# ---------------------------------------------------------------------------
+# Modules
+
+ifdef CONFIG_MODULES
+
+# By default, build modules as well
+
+all: modules
+
+# Build modules
+
+PHONY += modules
+modules: $(vmlwk-dirs) $(if $(KBUILD_BUILTIN),vmlwk)
+ @echo ' Building modules, stage 2.';
+ $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost
+
+
+# Target to prepare building external modules
+PHONY += modules_prepare
+modules_prepare: prepare scripts
+
+# Target to install modules
+PHONY += modules_install
+modules_install: _modinst_ _modinst_post
+
+PHONY += _modinst_
+_modinst_:
+ @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
+ echo "Warning: you may need to install module-init-tools"; \
+ echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
+ sleep 1; \
+ fi
+ @rm -rf $(MODLIB)/kernel
+ @rm -f $(MODLIB)/source
+ @mkdir -p $(MODLIB)/kernel
+ @ln -s $(srctree) $(MODLIB)/source
+ @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \
+ rm -f $(MODLIB)/build ; \
+ ln -s $(objtree) $(MODLIB)/build ; \
+ fi
+ $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst
+
+# If System.map exists, run depmod. This deliberately does not have a
+# dependency on System.map since that would run the dependency tree on
+# vmlwk. This depmod is only for convenience to give the initial
+# boot a modules.dep even before / is mounted read-write. However the
+# boot script depmod is the master version.
+ifeq "$(strip $(INSTALL_MOD_PATH))" ""
+depmod_opts :=
+else
+depmod_opts := -b $(INSTALL_MOD_PATH) -r
+endif
+PHONY += _modinst_post
+_modinst_post: _modinst_
+ if [ -r System.map -a -x $(DEPMOD) ]; then $(DEPMOD) -ae -F System.map $(depmod_opts) $(KERNELRELEASE); fi
+
+else # CONFIG_MODULES
+
+# Modules not configured
+# ---------------------------------------------------------------------------
+
+modules modules_install: FORCE
+ @echo
+ @echo "The present kernel configuration has modules disabled."
+ @echo "Type 'make config' and enable loadable module support."
+ @echo "Then build a kernel with module support enabled."
+ @echo
+ @exit 1
+
+endif # CONFIG_MODULES
+
+###
+# Cleaning is done on three levels.
+# make clean Delete most generated files
+# Leave enough to build external modules
+# make mrproper Delete the current configuration, and all generated files
+# make distclean Remove editor backup files, patch leftover files and the like
+
+# Directories & files removed with 'make clean'
+CLEAN_DIRS += $(MODVERDIR)
+CLEAN_FILES += vmlwk System.map vmlwk.bin vmlwk.asm \
+ .tmp_kallsyms* .tmp_version .tmp_vmlwk* .tmp_System.map
+
+# Directories & files removed with 'make mrproper'
+MRPROPER_DIRS += include/config include2
+MRPROPER_FILES += .config .config.old include/arch .version .old_version \
+ include/lwk/autoconf.h include/lwk/version.h \
+ .kernelrelease Module.symvers tags TAGS cscope*
+
+# clean - Delete most, but leave enough to build external modules
+#
+clean: rm-dirs := $(CLEAN_DIRS)
+clean: rm-files := $(CLEAN_FILES)
+clean-dirs := $(addprefix _clean_,$(srctree) $(vmlwk-alldirs))
+
+PHONY += $(clean-dirs) clean archclean
+$(clean-dirs):
+ $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
+
+clean: archclean $(clean-dirs)
+ $(call cmd,rmdirs)
+ $(call cmd,rmfiles)
+ @find . $(RCS_FIND_IGNORE) \
+ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \
+ -type f -print | xargs rm -f
+ @rm -f init_task
+
+# mrproper - Delete all generated files, including .config
+#
+mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
+mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
+#mrproper-dirs := $(addprefix _mrproper_,Documentation/DocBook scripts)
+mrproper-dirs := $(addprefix _mrproper_, scripts)
+
+PHONY += $(mrproper-dirs) mrproper archmrproper
+$(mrproper-dirs):
+ $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
+
+mrproper: clean archmrproper $(mrproper-dirs)
+ $(call cmd,rmdirs)
+ $(call cmd,rmfiles)
+
+# distclean
+#
+PHONY += distclean
+
+distclean: mrproper
+ @find $(srctree) $(RCS_FIND_IGNORE) \
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+ -o -name '.*.rej' -o -size 0 \
+ -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+
+# Packaging of the kernel to various formats
+# ---------------------------------------------------------------------------
+# rpm target kept for backward compatibility
+package-dir := $(srctree)/scripts/package
+
+%pkg: FORCE
+ $(Q)$(MAKE) $(build)=$(package-dir) $@
+rpm: FORCE
+ $(Q)$(MAKE) $(build)=$(package-dir) $@
+
+
+# Brief documentation of the typical targets used
+# ---------------------------------------------------------------------------
+
+boards := $(wildcard $(srctree)/arch/$(ARCH)/configs/*_defconfig)
+boards := $(notdir $(boards))
+
+help:
+ @echo 'Cleaning targets:'
+ @echo ' clean - remove most generated files but keep the config'
+ @echo ' mrproper - remove all generated files + config + various backup files'
+ @echo ''
+ @echo 'Configuration targets:'
+ @$(MAKE) -f $(srctree)/scripts/kconfig/Makefile help
+ @echo ''
+ @echo 'Other generic targets:'
+ @echo ' all - Build all targets marked with [*]'
+ @echo '* vmllwk - Build the bare kernel'
+ @echo '* modules - Build all modules'
+ @echo ' modules_install - Install all modules to INSTALL_MOD_PATH (default: /)'
+ @echo ' dir/ - Build all files in dir and below'
+ @echo ' dir/file.[ois] - Build specified target only'
+ @echo ' dir/file.ko - Build module including final link'
+ @echo ' rpm - Build a kernel as an RPM package'
+ @echo ' tags/TAGS - Generate tags file for editors'
+ @echo ' cscope - Generate cscope index'
+ @echo ' kernelrelease - Output the release version string'
+ @echo ' kernelversion - Output the version stored in Makefile'
+ @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'
+ @echo ' (default: /lib/modules/$$VERSION/abi)'
+ @echo ''
+ @echo 'Static analysers'
+ @echo ' checkstack - Generate a list of stack hogs'
+ @echo ' namespacecheck - Name space analysis on compiled kernel'
+ @echo ''
+ @echo 'Kernel packaging:'
+ @$(MAKE) $(build)=$(package-dir) help
+ @echo ''
+ @echo 'Documentation targets:'
+ @$(MAKE) -f $(srctree)/Documentation/DocBook/Makefile dochelp
+ @echo ''
+ @echo 'Architecture specific targets ($(ARCH)):'
+ @$(if $(archhelp),$(archhelp),\
+ echo ' No architecture specific help defined for $(ARCH)')
+ @echo ''
+ @$(if $(boards), \
+ $(foreach b, $(boards), \
+ printf " %-24s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \
+ echo '')
+
+ @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
+ @echo ' make O=dir [targets] Locate all output files in "dir", including .config'
+ @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse)'
+ @echo ' make C=2 [targets] Force check of all c source with $$CHECK (sparse)'
+ @echo ''
+ @echo 'Execute "make" or "make all" to build all targets marked with [*] '
+ @echo 'For further info see the ./README file'
+
+
+# Documentation targets
+# ---------------------------------------------------------------------------
+%docs: scripts_basic FORCE
+ $(Q)$(MAKE) $(build)=Documentation/DocBook $@
+
+else # KBUILD_EXTMOD
+
+###
+# External module support.
+# When building external modules the kernel used as basis is considered
+# read-only, and no consistency checks are made and the make
+# system is not used on the basis kernel. If updates are required
+# in the basis kernel ordinary make commands (without M=...) must
+# be used.
+#
+# The following are the only valid targets when building external
+# modules.
+# make M=dir clean Delete all automatically generated files
+# make M=dir modules Make all modules in specified dir
+# make M=dir Same as 'make M=dir modules'
+# make M=dir modules_install
+# Install the modules build in the module directory
+# Assumes install directory is already created
+
+# We are always building modules
+KBUILD_MODULES := 1
+PHONY += crmodverdir
+crmodverdir:
+ $(Q)mkdir -p $(MODVERDIR)
+ $(Q)rm -f $(MODVERDIR)/*
+
+PHONY += $(objtree)/Module.symvers
+$(objtree)/Module.symvers:
+ @test -e $(objtree)/Module.symvers || ( \
+ echo; \
+ echo " WARNING: Symbol version dump $(objtree)/Module.symvers"; \
+ echo " is missing; modules will have no dependencies and modversions."; \
+ echo )
+
+module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD))
+PHONY += $(module-dirs) modules
+$(module-dirs): crmodverdir $(objtree)/Module.symvers
+ $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
+
+modules: $(module-dirs)
+ @echo ' Building modules, stage 2.';
+ $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost
+
+PHONY += modules_install
+modules_install: _emodinst_ _emodinst_post
+
+install-dir := $(if $(INSTALL_MOD_DIR),$(INSTALL_MOD_DIR),extra)
+PHONY += _emodinst_
+_emodinst_:
+ $(Q)mkdir -p $(MODLIB)/$(install-dir)
+ $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst
+
+# Run depmod only is we have System.map and depmod is executable
+quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
+ cmd_depmod = if [ -r System.map -a -x $(DEPMOD) ]; then \
+ $(DEPMOD) -ae -F System.map \
+ $(if $(strip $(INSTALL_MOD_PATH)), \
+ -b $(INSTALL_MOD_PATH) -r) \
+ $(KERNELRELEASE); \
+ fi
+
+PHONY += _emodinst_post
+_emodinst_post: _emodinst_
+ $(call cmd,depmod)
+
+clean-dirs := $(addprefix _clean_,$(KBUILD_EXTMOD))
+
+PHONY += $(clean-dirs) clean
+$(clean-dirs):
+ $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
+
+clean: rm-dirs := $(MODVERDIR)
+clean: $(clean-dirs)
+ $(call cmd,rmdirs)
+ @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
+ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \
+ -type f -print | xargs rm -f
+
+help:
+ @echo ' Building external modules.'
+ @echo ' Syntax: make -C path/to/kernel/src M=$$PWD target'
+ @echo ''
+ @echo ' modules - default target, build the module(s)'
+ @echo ' modules_install - install the module'
+ @echo ' clean - remove generated files in module directory only'
+ @echo ''
+
+# Dummies...
+PHONY += prepare scripts
+prepare: ;
+scripts: ;
+endif # KBUILD_EXTMOD
+
+# Generate tags for editors
+# ---------------------------------------------------------------------------
+
+#We want __srctree to totally vanish out when KBUILD_OUTPUT is not set
+#(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
+#Adding $(srctree) adds about 20M on i386 to the size of the output file!
+
+ifeq ($(src),$(obj))
+__srctree =
+else
+__srctree = $(srctree)/
+endif
+
+ifeq ($(ALLSOURCE_ARCHS),)
+ifeq ($(ARCH),um)
+ALLINCLUDE_ARCHS := $(ARCH) $(SUBARCH)
+else
+ALLINCLUDE_ARCHS := $(ARCH)
+endif
+else
+#Allow user to specify only ALLSOURCE_PATHS on the command line, keeping existing behaviour.
+ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS)
+endif
+
+ALLSOURCE_ARCHS := $(ARCH)
+
+define all-sources
+ ( find $(__srctree) $(RCS_FIND_IGNORE) \
+ \( -name include -o -name arch \) -prune -o \
+ -name '*.[chS]' -print; \
+ for ARCH in $(ALLSOURCE_ARCHS) ; do \
+ find $(__srctree)arch/$${ARCH} $(RCS_FIND_IGNORE) \
+ -name '*.[chS]' -print; \
+ done ; \
+ find $(__srctree)include $(RCS_FIND_IGNORE) \
+ \( -name config -o -name 'arch-*' \) -prune \
+ -o -name '*.[chS]' -print; \
+ for ARCH in $(ALLINCLUDE_ARCHS) ; do \
+ find $(__srctree)include/arch-$${ARCH} $(RCS_FIND_IGNORE) \
+ -name '*.[chS]' -print; \
+ done ; \
+ find $(__srctree)include/arch-generic $(RCS_FIND_IGNORE) \
+ -name '*.[chS]' -print )
+endef
+
+quiet_cmd_cscope-file = FILELST cscope.files
+ cmd_cscope-file = (echo \-k; echo \-q; $(all-sources)) > cscope.files
+
+quiet_cmd_cscope = MAKE cscope.out
+ cmd_cscope = cscope -b
+
+cscope: FORCE
+ $(call cmd,cscope-file)
+ $(call cmd,cscope)
+
+quiet_cmd_TAGS = MAKE $@
+define cmd_TAGS
+ rm -f $@; \
+ ETAGSF=`etags --version | grep -i exuberant >/dev/null && \
+ echo "-I __initdata,__exitdata,__acquires,__releases \
+ -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
+ --extra=+f --c-kinds=+px"`; \
+ $(all-sources) | xargs etags $$ETAGSF -a
+endef
+
+TAGS: FORCE
+ $(call cmd,TAGS)
+
+
+quiet_cmd_tags = MAKE $@
+define cmd_tags
+ rm -f $@; \
+ CTAGSF=`ctags --version | grep -i exuberant >/dev/null && \
+ echo "-I __initdata,__exitdata,__acquires,__releases \
+ -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
+ --extra=+f --c-kinds=+px"`; \
+ $(all-sources) | xargs ctags $$CTAGSF -a
+endef
+
+tags: FORCE
+ $(call cmd,tags)
+
+
+# Scripts to check various things for consistency
+# ---------------------------------------------------------------------------
+
+includecheck:
+ find * $(RCS_FIND_IGNORE) \
+ -name '*.[hcS]' -type f -print | sort \
+ | xargs $(PERL) -w scripts/checkincludes.pl
+
+versioncheck:
+ find * $(RCS_FIND_IGNORE) \
+ -name '*.[hcS]' -type f -print | sort \
+ | xargs $(PERL) -w scripts/checkversion.pl
+
+namespacecheck:
+ $(PERL) $(srctree)/scripts/namespace.pl
+
+endif #ifeq ($(config-targets),1)
+endif #ifeq ($(mixed-targets),1)
+
+PHONY += checkstack
+checkstack:
+ $(OBJDUMP) -d vmlwk $$(find . -name '*.ko') | \
+ $(PERL) $(src)/scripts/checkstack.pl $(ARCH)
+
+kernelrelease:
+ $(if $(wildcard .kernelrelease), $(Q)echo $(KERNELRELEASE), \
+ $(error kernelrelease not valid - run 'make *config' to update it))
+kernelversion:
+ @echo $(KERNELVERSION)
+
+# Single targets
+# ---------------------------------------------------------------------------
+# Single targets are compatible with:
+# - build whith mixed source and output
+# - build with separate output dir 'make O=...'
+# - external modules
+#
+# target-dir => where to store outputfile
+# build-dir => directory in kernel source tree to use
+
+ifeq ($(KBUILD_EXTMOD),)
+ build-dir = $(patsubst %/,%,$(dir $@))
+ target-dir = $(dir $@)
+else
+ zap-slash=$(filter-out .,$(patsubst %/,%,$(dir $@)))
+ build-dir = $(KBUILD_EXTMOD)$(if $(zap-slash),/$(zap-slash))
+ target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
+endif
+
+%.s: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.i: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.o: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.lst: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.s: %.S prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.o: %.S prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+
+# Modules
+/ %/: prepare scripts FORCE
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+%.ko: prepare scripts FORCE
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir) $(@:.ko=.o)
+ $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost
+
+# FIXME Should go into a make.lib or something
+# ===========================================================================
+
+quiet_cmd_rmdirs = $(if $(wildcard $(rm-dirs)),CLEAN $(wildcard $(rm-dirs)))
+ cmd_rmdirs = rm -rf $(rm-dirs)
+
+quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files)))
+ cmd_rmfiles = rm -f $(rm-files)
+
+
+a_flags = -Wp,-MD,$(depfile) $(AFLAGS) $(AFLAGS_KERNEL) \
+ $(NOSTDINC_FLAGS) $(CPPFLAGS) \
+ $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
+
+quiet_cmd_as_o_S = AS $@
+cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+# read all saved command lines
+
+targets := $(wildcard $(sort $(targets)))
+cmd_files := $(wildcard .*.cmd $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+
+ifneq ($(cmd_files),)
+ $(cmd_files): ; # Do not try to update included dependency files
+ include $(cmd_files)
+endif
+
+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir
+# Usage:
+# $(Q)$(MAKE) $(clean)=dir
+clean := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.clean obj
+
+endif # skip-makefile
+
+# Build LWK user-space libraries and example programs
+user: FORCE
+ cd user/; $(MAKE) all
+
+# A simple user-space app for the LWK to launch at boot
+init_task: user FORCE
+ cp user/hello_world/hello_world ./init_task
+
+PHONY += FORCE
+FORCE:
+
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable se we can use it in if_changed and friends.
+.PHONY: $(PHONY)
--- /dev/null
+make config
+make
+make isoimage
+qemu-system-x86_64 -cdrom arch/x86_64/boot/image.iso -serial stdio
+
+(Can add -smp 4 option as well, but the BIOS my version of QEMU is using
+ doesn't appear to provide an MP table, which Kitten depends on. KVM seems
+ to work fine with -smp 4 arg... all cpus are detected and initialized.)
+
+----------------------------------------------------------------------------
+
+Scripts for building cross-compiler toolchains are in scripts/toolchain.
+E.g., to build an x86-64 toolchain:
+
+ cd scripts/toolchain
+ ./build-x86_64.sh
+
+By default, the toolchain will be installed in /opt/toolchain/x86_64.
+Edit PREFIX in build-x86_64.sh to change this.
+
+To build a x86_64 Kitten using a cross-compiler:
+
+ export PATH=/opt/toolchain/x86_64/bin:$PATH
+ make ARCH=x86_64 CROSS_COMPILE=x86_64-linux-gnu- config
+ make ARCH=x86_64 CROSS_COMPILE=x86_64-linux-gnu-
+
+The resulting kernel image is at:
+
+ arch/x86_64/boot/bzImage
+
+This is a drop in replacement for a Linux bzImage.
+
+If you have syslinux installed ('syslinux' Debian package), a bootable
+iso cdrom image can be built with:
+
+ make ARCH=x86_64 CROSS_COMPILE=x86_64-linux-gnu- isoimage
+
+and booted under the Qemu full-system simulator ('qemu' Debian package):
+
+ qemu-system-x86_64 -cdrom arch/x86_64/boot/image.iso
+
--- /dev/null
+mainmenu "Kitten Kernel Configuration"
+
+config x86_64
+ bool
+ default y
+ help
+ Support for the x86-64 architecture.
+
+source "init/Kconfig"
+
+menu "Target Configuration"
+
+choice
+ prompt "System Architecture"
+ default PC
+
+config PC
+ bool "PC-compatible"
+ help
+ Support for standard PC compatible systems.
+
+config CRAY_XT
+ bool "Red Storm (Cray XT3/XT4)"
+ help
+ Support for Cray XT3 and XT4 systems.
+
+endchoice
+
+
+choice
+ prompt "Processor Family"
+ default MK8
+
+config MK8
+ bool "AMD-Opteron/Athlon64"
+ help
+ Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
+
+config MPSC
+ bool "Intel-64/Core2"
+ help
+ Optimize for Intel 64 architecture.
+
+config GENERIC_CPU
+ bool "Generic-x86-64"
+ help
+ Generic x86-64 CPU.
+
+endchoice
+
+
+#
+# Define implied options from the CPU selection
+#
+
+config X86_L1_CACHE_BYTES
+ int
+ default "128" if GENERIC_CPU || MPSC
+ default "64" if MK8
+
+config X86_L1_CACHE_SHIFT
+ int
+ default "7" if GENERIC_CPU || MPSC
+ default "6" if MK8
+
+config X86_INTERNODE_CACHE_BYTES
+ int
+ default X86_L1_CACHE_BYTES
+
+config X86_INTERNODE_CACHE_SHIFT
+ int
+ default X86_L1_CACHE_SHIFT
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-256)"
+ range 1 255
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support. Current maximum is 256 CPUs due to
+ APIC addressing limits. Less depending on the hardware.
+
+ This is purely to save memory - each supported CPU requires
+ memory in the static kernel configuration.
+
+#
+# Physical address where the kernel is loaded
+#
+config PHYSICAL_START
+ hex
+ default "0x200000"
+
+endmenu
+
+
+config VGA_CONSOLE
+ bool
+ default "y" if PC
+
+config SERIAL_CONSOLE
+ bool
+ default "y" if PC
+
+config RCAL0_CONSOLE
+ bool
+ default "y" if CRAY_XT
+ default "n"
+
+source "arch/x86_64/Kconfig.debug"
--- /dev/null
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ depends on DEBUG_KERNEL
+ help
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const data.
+ This option may have a slight performance impact because a portion
+ of the kernel code won't be covered by a 2MB TLB anymore.
+ If in doubt, say "N".
+
+endmenu
--- /dev/null
+# x86_64/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 19990713 Artur Skawina <skawina@geocities.com>
+# Added '-march' and '-mpreferred-stack-boundary' support
+# 20000913 Pavel Machek <pavel@suse.cz>
+# Converted for x86_64 architecture
+# 20010105 Andi Kleen, add IA32 compiler.
+# ....and later removed it again....
+# 20070816 Kevin Pedretti <ktpedre@sandia.gov>
+# Modifications for Kitten. Remove unneeded stuff.
+#
+
+LDFLAGS := -m elf_x86_64
+OBJCOPYFLAGS := -O binary -R .note -R .comment -S
+LDFLAGS_vmlinux :=
+CHECKFLAGS += -D__x86_64__ -m64
+
+cflags-y :=
+cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
+cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+
+cflags-y += -m64
+cflags-y += -mno-red-zone
+cflags-y += -mcmodel=kernel
+cflags-y += -pipe
+cflags-y += -ffunction-sections
+# this makes reading assembly source easier, but produces worse code
+# actually it makes the kernel smaller too.
+cflags-y += -fno-reorder-blocks
+cflags-y += -Wno-sign-compare
+cflags-y += -Wno-unused-parameter
+# -funit-at-a-time shrinks the kernel .text considerably
+# unfortunately it makes reading oopses harder.
+cflags-y += $(call cc-option,-funit-at-a-time)
+# prevent gcc from generating any FP code by mistake
+cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+
+CFLAGS += $(cflags-y)
+AFLAGS += -m64
+
+head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
+
+libs-y += arch/x86_64/lib/
+core-y += arch/x86_64/kernel/ \
+ arch/x86_64/mm/
+
+boot := arch/x86_64/boot
+
+PHONY += bzImage archmrproper isoimage archclean
+
+#Default target when executing "make"
+all: bzImage
+
+BOOTIMAGE := arch/x86_64/boot/bzImage
+KBUILD_IMAGE := $(BOOTIMAGE)
+
+bzImage: vmlwk $(DEFAULT_EXTRA_TARGETS)
+ $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
+
+isoimage: vmlwk $(DEFAULT_EXTRA_TARGETS)
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+ echo '* bzImage - Compressed kernel image (arch/$(ARCH)/boot/bzImage)'
+ echo ' isoimage - Create a boot CD-ROM image'
+endef
+
+CLEAN_FILES += arch/$(ARCH)/boot/image.iso
+
+
--- /dev/null
+#
+# arch/x86_64/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 2007 by Linus Torvalds
+#
+# 20070816 Kevin Pedretti <ktpedre@sandia.gov>
+# Modifications for Kitten. Remove unneeded stuff.
+#
+
+# ROOT_DEV specifies the default root-device when making the image.
+# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
+# the default of FLOPPY is used by 'build'.
+
+ROOT_DEV := CURRENT
+
+targets := vmlwk.bin bootsect bootsect.o \
+ setup setup.o bzImage
+
+EXTRA_CFLAGS := -m32
+
+hostprogs-y := tools/build
+HOST_EXTRACFLAGS += $(LWKINCLUDE)
+subdir- := compressed/ #Let make clean descend in compressed/
+# ---------------------------------------------------------------------------
+
+$(obj)/bzImage: IMAGE_OFFSET := 0x100000
+$(obj)/bzImage: EXTRA_AFLAGS := -traditional -D__BIG_KERNEL__
+$(obj)/bzImage: BUILDFLAGS := -b
+
+quiet_cmd_image = BUILD $@
+cmd_image = $(obj)/tools/build $(BUILDFLAGS) $(obj)/bootsect $(obj)/setup \
+ $(obj)/vmlwk.bin $(ROOT_DEV) > $@
+
+$(obj)/bzImage: $(obj)/bootsect $(obj)/setup \
+ $(obj)/vmlwk.bin $(obj)/tools/build FORCE
+ $(call if_changed,image)
+ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+
+$(obj)/vmlwk.bin: $(obj)/compressed/vmlwk FORCE
+ $(call if_changed,objcopy)
+
+LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary
+LDFLAGS_setup := -Ttext 0x0 -s --oformat binary -e begtext
+
+$(obj)/setup $(obj)/bootsect: %: %.o FORCE
+ $(call if_changed,ld)
+
+$(obj)/compressed/vmlwk: FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@
+
+# Set this if you want to pass append arguments to the isoimage kernel
+FDARGS = console=vga,serial debug_mptable=1 init_argv="one two three" init_envp="one=1 two=2 three=3"
+# Set this if you want an initrd included with the isoimage kernel
+FDINITRD = init_task
+
+image_cmdline = default lwk $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
+
+# This requires being root or having syslinux 2.02 or higher installed
+isoimage: $(BOOTIMAGE) $(FDINITRD)
+ -rm -rf $(obj)/isoimage
+ mkdir $(obj)/isoimage
+ for i in lib lib64 share end ; do \
+ if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
+ cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+ break ; \
+ fi ; \
+ if [ $$i = end ] ; then exit 1 ; fi ; \
+ done
+ cp $(BOOTIMAGE) $(obj)/isoimage/lwk
+ echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
+ if [ -f '$(FDINITRD)' ] ; then \
+ cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
+ fi
+ mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \
+ -no-emul-boot -boot-load-size 4 -boot-info-table \
+ $(obj)/isoimage
+ rm -rf $(obj)/isoimage
+
--- /dev/null
+/*
+ * bootsect.S Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * modified by Drew Eckhardt
+ * modified by Bruce Evans (bde)
+ * modified by Chris Noe (May 1999) (as86 -> gas)
+ * gutted by H. Peter Anvin (Jan 2003)
+ *
+ * BIG FAT NOTE: We're in real mode using 64k segments. Therefore segment
+ * addresses must be multiplied by 16 to obtain their respective linear
+ * addresses. To avoid confusion, linear addresses are written using leading
+ * hex while segment addresses are written as segment:offset.
+ *
+ */
+
+#include <arch/boot.h>
+
+SETUPSECTS = 4 /* default nr of setup-sectors */
+BOOTSEG = 0x07C0 /* original address of boot-sector */
+INITSEG = DEF_INITSEG /* we move boot here - out of the way */
+SETUPSEG = DEF_SETUPSEG /* setup starts here */
+SYSSEG = DEF_SYSSEG /* system loaded at 0x10000 (65536) */
+SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */
+ /* to be loaded */
+ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */
+SWAP_DEV = 0 /* SWAP_DEV is now written by "build" */
+
+#define SVGA_MODE 0
+
+#ifndef RAMDISK
+#define RAMDISK 0
+#endif
+
+#ifndef ROOT_RDONLY
+#define ROOT_RDONLY 1
+#endif
+
+.code16
+.text
+
+.global _start
+_start:
+
+ # Normalize the start address
+ jmpl $BOOTSEG, $start2
+
+start2:
+ movw %cs, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %ss
+ movw $0x7c00, %sp
+ sti
+ cld
+
+ movw $bugger_off_msg, %si
+
+msg_loop:
+ lodsb
+ andb %al, %al
+ jz die
+ movb $0xe, %ah
+ movw $7, %bx
+ int $0x10
+ jmp msg_loop
+
+die:
+ # Allow the user to press a key, then reboot
+ xorw %ax, %ax
+ int $0x16
+ int $0x19
+
+ # int 0x19 should never return. In case it does anyway,
+ # invoke the BIOS reset code...
+ ljmp $0xf000,$0xfff0
+
+
+bugger_off_msg:
+ .ascii "Direct booting from floppy is no longer supported.\r\n"
+ .ascii "Please use a boot loader program instead.\r\n"
+ .ascii "\n"
+ .ascii "Remove disk and press any key to reboot . . .\r\n"
+ .byte 0
+
+
+ # Kernel attributes; used by setup
+
+ .org 497
+setup_sects: .byte SETUPSECTS
+root_flags: .word ROOT_RDONLY
+syssize: .word SYSSIZE
+swap_dev: .word SWAP_DEV
+ram_size: .word RAMDISK
+vid_mode: .word SVGA_MODE
+root_dev: .word ROOT_DEV
+boot_flag: .word 0xAA55
--- /dev/null
+#
+# lwk/arch/x86_64/boot/compressed/Makefile
+#
+# create a compressed vmlwk image from the original vmlwk
+#
+# Note all the files here are compiled/linked as 32bit executables.
+#
+
+targets := vmlwk vmlwk.bin vmlwk.bin.gz head.o misc.o piggy.o
+EXTRA_AFLAGS := -traditional
+
+# cannot use EXTRA_CFLAGS because base CFLAGS contains -mkernel which conflicts with
+# -m32
+CFLAGS := -m64 -D__KERNEL__ -Iinclude -O2 -fno-strict-aliasing -fPIC -mcmodel=small -fno-builtin
+LDFLAGS := -m elf_x86_64
+
+LDFLAGS_vmlwk := -T
+$(obj)/vmlwk: $(src)/vmlwk.lds $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
+ $(call if_changed,ld)
+ @:
+
+$(obj)/vmlwk.bin: vmlwk FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlwk.bin.gz: $(obj)/vmlwk.bin FORCE
+ $(call if_changed,gzip)
+
+LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
+
+$(obj)/piggy.o: $(obj)/vmlwk.scr $(obj)/vmlwk.bin.gz FORCE
+ $(call if_changed,ld)
--- /dev/null
+/*
+ * linux/boot/head.S
+ *
+ * Copyright (C) 1991, 1992, 1993 Linus Torvalds
+ */
+
+/*
+ * head.S contains the 32-bit startup code.
+ *
+ * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
+ * the page directory will exist. The startup code will be overwritten by
+ * the page directory. [According to comments etc elsewhere on a compressed
+ * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
+ *
+ * Page 0 is deliberately kept safe, since System Management Mode code in
+ * laptops may need to access the BIOS data stored there. This is also
+ * useful for future device drivers that either access the BIOS via VM86
+ * mode.
+ */
+
+/*
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ */
+.code32
+.text
+
+#include <lwk/linkage.h>
+#include <arch/segment.h>
+#include <arch/pgtable.h>
+#include <arch/page.h>
+#include <arch/msr.h>
+
+.section ".text.head"
+ .code32
+ .globl startup_32
+
+startup_32:
+ cld
+ cli
+ movl $(__KERNEL_DS), %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
+
+/* Calculate the delta between where we were compiled to run
+ * at and where we were actually loaded at. This can only be done
+ * with a short local call on x86. Nothing else will tell us what
+ * address we are running at. The reserved chunk of the real-mode
+ * data at 0x34-0x3f are used as the stack for this calculation.
+ * Only 4 bytes are needed.
+ */
+ leal 0x40(%esi), %esp
+ call 1f
+1: popl %ebp
+ subl $1b, %ebp
+
+/* setup a stack and make sure cpu supports long mode. */
+ movl $user_stack_end, %eax
+ addl %ebp, %eax
+ movl %eax, %esp
+
+ call verify_cpu
+ testl %eax, %eax
+ jnz no_longmode
+
+/* Compute the delta between where we were compiled to run at
+ * and where the code will actually run at.
+ */
+/* %ebp contains the address we are loaded at by the boot loader and %ebx
+ * contains the address where we should move the kernel image temporarily
+ * for safe in-place decompression.
+ */
+
+#ifdef CONFIG_RELOCATABLE
+ movl %ebp, %ebx
+ addl $(LARGE_PAGE_SIZE -1), %ebx
+ andl $LARGE_PAGE_MASK, %ebx
+#else
+ movl $CONFIG_PHYSICAL_START, %ebx
+#endif
+
+ /* Replace the compressed data size with the uncompressed size */
+ subl input_len(%ebp), %ebx
+ movl output_len(%ebp), %eax
+ addl %eax, %ebx
+ /* Add 8 bytes for every 32K input block */
+ shrl $12, %eax
+ addl %eax, %ebx
+ /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */
+ addl $(32768 + 18 + 4095), %ebx
+ andl $~4095, %ebx
+
+/*
+ * Prepare for entering 64 bit mode
+ */
+
+ /* Load new GDT with the 64bit segments using 32bit descriptor */
+ leal gdt(%ebp), %eax
+ movl %eax, gdt+2(%ebp)
+ lgdt gdt(%ebp)
+
+ /* Enable PAE mode */
+ xorl %eax, %eax
+ orl $(1 << 5), %eax
+ movl %eax, %cr4
+
+ /*
+ * Build early 4G boot pagetable
+ */
+ /* Initialize Page tables to 0*/
+ leal pgtable(%ebx), %edi
+ xorl %eax, %eax
+ movl $((4096*6)/4), %ecx
+ rep stosl
+
+ /* Build Level 4 */
+ leal pgtable + 0(%ebx), %edi
+ leal 0x1007 (%edi), %eax
+ movl %eax, 0(%edi)
+
+ /* Build Level 3 */
+ leal pgtable + 0x1000(%ebx), %edi
+ leal 0x1007(%edi), %eax
+ movl $4, %ecx
+1: movl %eax, 0x00(%edi)
+ addl $0x00001000, %eax
+ addl $8, %edi
+ decl %ecx
+ jnz 1b
+
+ /* Build Level 2 */
+ leal pgtable + 0x2000(%ebx), %edi
+ movl $0x00000183, %eax
+ movl $2048, %ecx
+1: movl %eax, 0(%edi)
+ addl $0x00200000, %eax
+ addl $8, %edi
+ decl %ecx
+ jnz 1b
+
+ /* Enable the boot page tables */
+ leal pgtable(%ebx), %eax
+ movl %eax, %cr3
+
+ /* Enable Long mode in EFER (Extended Feature Enable Register) */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ /* Setup for the jump to 64bit mode
+ *
+ * When the jump is performend we will be in long mode but
+ * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
+ * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
+ * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
+ * We place all of the values on our mini stack so lret can
+ * used to perform that far jump.
+ */
+ pushl $__KERNEL_CS
+ leal startup_64(%ebp), %eax
+ pushl %eax
+
+ /* Enter paged protected Mode, activating Long Mode */
+ movl $0x80000001, %eax /* Enable Paging and Protected mode */
+ movl %eax, %cr0
+
+ /* Jump from 32bit compatibility mode into 64bit mode. */
+ lret
+
+no_longmode:
+ /* This isn't an x86-64 CPU so hang */
+1:
+ hlt
+ jmp 1b
+
+#include "../../kernel/verify_cpu.S"
+
+ /* Be careful here startup_64 needs to be at a predictable
+ * address so I can export it in an ELF header. Bootloaders
+ * should look at the ELF header to find this address, as
+ * it may change in the future.
+ */
+ .code64
+ .org 0x200
+ENTRY(startup_64)
+ /* We come here either from startup_32 or directly from a
+ * 64bit bootloader. If we come here from a bootloader we depend on
+ * an identity mapped page table being provied that maps our
+ * entire text+data+bss and hopefully all of memory.
+ */
+
+ /* Setup data segments. */
+ xorl %eax, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
+
+ /* Compute the decompressed kernel start address. It is where
+ * we were loaded at aligned to a 2M boundary. %rbp contains the
+ * decompressed kernel start address.
+ *
+ * If it is a relocatable kernel then decompress and run the kernel
+ * from load address aligned to 2MB addr, otherwise decompress and
+ * run the kernel from CONFIG_PHYSICAL_START
+ */
+
+ /* Start with the delta to where the kernel will run at. */
+#ifdef CONFIG_RELOCATABLE
+ leaq startup_32(%rip) /* - $startup_32 */, %rbp
+ addq $(LARGE_PAGE_SIZE - 1), %rbp
+ andq $LARGE_PAGE_MASK, %rbp
+ movq %rbp, %rbx
+#else
+ movq $CONFIG_PHYSICAL_START, %rbp
+ movq %rbp, %rbx
+#endif
+
+ /* Replace the compressed data size with the uncompressed size */
+ movl input_len(%rip), %eax
+ subq %rax, %rbx
+ movl output_len(%rip), %eax
+ addq %rax, %rbx
+ /* Add 8 bytes for every 32K input block */
+ shrq $12, %rax
+ addq %rax, %rbx
+ /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */
+ addq $(32768 + 18 + 4095), %rbx
+ andq $~4095, %rbx
+
+/* Copy the compressed kernel to the end of our buffer
+ * where decompression in place becomes safe.
+ */
+ leaq _end(%rip), %r8
+ leaq _end(%rbx), %r9
+ movq $_end /* - $startup_32 */, %rcx
+1: subq $8, %r8
+ subq $8, %r9
+ movq 0(%r8), %rax
+ movq %rax, 0(%r9)
+ subq $8, %rcx
+ jnz 1b
+
+/*
+ * Jump to the relocated address.
+ */
+ leaq relocated(%rbx), %rax
+ jmp *%rax
+
+.section ".text"
+relocated:
+
+/*
+ * Clear BSS
+ */
+ xorq %rax, %rax
+ leaq _edata(%rbx), %rdi
+ leaq _end(%rbx), %rcx
+ subq %rdi, %rcx
+ cld
+ rep
+ stosb
+
+ /* Setup the stack */
+ leaq user_stack_end(%rip), %rsp
+
+ /* zero EFLAGS after setting rsp */
+ pushq $0
+ popfq
+
+/*
+ * Do the decompression, and jump to the new kernel..
+ */
+ pushq %rsi # Save the real mode argument
+ movq %rsi, %rdi # real mode address
+ leaq _heap(%rip), %rsi # _heap
+ leaq input_data(%rip), %rdx # input_data
+ movl input_len(%rip), %eax
+ movq %rax, %rcx # input_len
+ movq %rbp, %r8 # output
+ call decompress_kernel
+ popq %rsi
+
+
+/*
+ * Jump to the decompressed kernel.
+ */
+ jmp *%rbp
+
+ .data
+gdt:
+ .word gdt_end - gdt
+ .long gdt
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
+ .quad 0x00cf92000000ffff /* __KERNEL_DS */
+gdt_end:
+ .bss
+/* Stack for uncompression */
+ .balign 4
+user_stack:
+ .fill 4096,4,0
+user_stack_end:
--- /dev/null
+/*
+ * misc.c
+ *
+ * This is a collection of several routines from gzip-1.0.3
+ * adapted for Linux.
+ *
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ * puts by Nick Holloway 1993, better puts by Martin Mares 1995
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ */
+
+#define _LWK_STRING_H 1
+#define _LWK_BITMAP_H 1
+
+#include <lwk/linkage.h>
+#include <lwk/screen_info.h>
+#include <arch/page.h>
+#include <arch/io.h>
+
+/* WARNING!!
+ * This code is compiled with -fPIC and it is relocated dynamically
+ * at run time, but no relocation processing is performed.
+ * This means that it is not safe to place pointers in static structures.
+ */
+
+/*
+ * Getting to provable safe in place decompression is hard.
+ * Worst case behaviours need to be analized.
+ * Background information:
+ *
+ * The file layout is:
+ * magic[2]
+ * method[1]
+ * flags[1]
+ * timestamp[4]
+ * extraflags[1]
+ * os[1]
+ * compressed data blocks[N]
+ * crc[4] orig_len[4]
+ *
+ * resulting in 18 bytes of non compressed data overhead.
+ *
+ * Files divided into blocks
+ * 1 bit (last block flag)
+ * 2 bits (block type)
+ *
+ * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
+ * The smallest block type encoding is always used.
+ *
+ * stored:
+ * 32 bits length in bytes.
+ *
+ * fixed:
+ * magic fixed tree.
+ * symbols.
+ *
+ * dynamic:
+ * dynamic tree encoding.
+ * symbols.
+ *
+ *
+ * The buffer for decompression in place is the length of the
+ * uncompressed data, plus a small amount extra to keep the algorithm safe.
+ * The compressed data is placed at the end of the buffer. The output
+ * pointer is placed at the start of the buffer and the input pointer
+ * is placed where the compressed data starts. Problems will occur
+ * when the output pointer overruns the input pointer.
+ *
+ * The output pointer can only overrun the input pointer if the input
+ * pointer is moving faster than the output pointer. A condition only
+ * triggered by data whose compressed form is larger than the uncompressed
+ * form.
+ *
+ * The worst case at the block level is a growth of the compressed data
+ * of 5 bytes per 32767 bytes.
+ *
+ * The worst case internal to a compressed block is very hard to figure.
+ * The worst case can at least be boundined by having one bit that represents
+ * 32764 bytes and then all of the rest of the bytes representing the very
+ * very last byte.
+ *
+ * All of which is enough to compute an amount of extra data that is required
+ * to be safe. To avoid problems at the block level allocating 5 extra bytes
+ * per 32767 bytes of data is sufficient. To avoind problems internal to a block
+ * adding an extra 32767 bytes (the worst case uncompressed block size) is
+ * sufficient, to ensure that in the worst case the decompressed data for
+ * block will stop the byte before the compressed data for a block begins.
+ * To avoid problems with the compressed data's meta information an extra 18
+ * bytes are needed. Leading to the formula:
+ *
+ * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
+ *
+ * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
+ * Adding 32768 instead of 32767 just makes for round numbers.
+ * Adding the decompressor_size is necessary as it musht live after all
+ * of the data as well. Last I measured the decompressor is about 14K.
+ * 10K of actuall data and 4K of bss.
+ *
+ */
+
+/*
+ * gzip declarations
+ */
+
+#define OF(args) args
+#define STATIC static
+
+#undef memset
+#undef memcpy
+#define memzero(s, n) memset ((s), 0, (n))
+
+typedef unsigned char uch;
+typedef unsigned short ush;
+typedef unsigned long ulg;
+
+#define WSIZE 0x80000000 /* Window size must be at least 32k,
+ * and a power of two
+ * We don't actually have a window just
+ * a huge output buffer so I report
+ * a 2G windows size, as that should
+ * always be larger than our output buffer.
+ */
+
+static uch *inbuf; /* input buffer */
+static uch *window; /* Sliding window buffer, (and final output buffer) */
+
+static unsigned insize; /* valid bytes in inbuf */
+static unsigned inptr; /* index of next byte to be processed in inbuf */
+static unsigned outcnt; /* bytes in output buffer */
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
+
+#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# define Assert(cond,msg) {if(!(cond)) error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+static int fill_inbuf(void);
+static void flush_window(void);
+static void error(char *m);
+static void gzip_mark(void **);
+static void gzip_release(void **);
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+static unsigned char *real_mode; /* Pointer to real-mode data */
+
+#define RM_EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
+#ifndef STANDARD_MEMORY_BIOS_CALL
+#define RM_ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
+#endif
+#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
+
+extern unsigned char input_data[];
+extern int input_len;
+
+static long bytes_out = 0;
+
+static void *malloc(int size);
+static void free(void *where);
+
+static void *memset(void *s, int c, unsigned n);
+static void *memcpy(void *dest, const void *src, unsigned n);
+
+static void putstr(const char *);
+
+static long free_mem_ptr;
+static long free_mem_end_ptr;
+
+#define HEAP_SIZE 0x7000
+
+static char *vidmem = (char *)0xb8000;
+static int vidport;
+static int lines, cols;
+
+#include "../../../../lib/inflate.c"
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size <0) error("Malloc error");
+ if (free_mem_ptr <= 0) error("Memory error");
+
+ free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
+
+ p = (void *)free_mem_ptr;
+ free_mem_ptr += size;
+
+ if (free_mem_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ return p;
+}
+
+static void free(void *where)
+{ /* Don't care */
+}
+
+static void gzip_mark(void **ptr)
+{
+ *ptr = (void *) free_mem_ptr;
+}
+
+static void gzip_release(void **ptr)
+{
+ free_mem_ptr = (long) *ptr;
+}
+
+static void scroll(void)
+{
+ int i;
+
+ memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 );
+ for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 )
+ vidmem[i] = ' ';
+}
+
+static void putstr(const char *s)
+{
+ int x,y,pos;
+ char c;
+
+ x = RM_SCREEN_INFO.orig_x;
+ y = RM_SCREEN_INFO.orig_y;
+
+ while ( ( c = *s++ ) != '\0' ) {
+ if ( c == '\n' ) {
+ x = 0;
+ if ( ++y >= lines ) {
+ scroll();
+ y--;
+ }
+ } else {
+ vidmem [ ( x + cols * y ) * 2 ] = c;
+ if ( ++x >= cols ) {
+ x = 0;
+ if ( ++y >= lines ) {
+ scroll();
+ y--;
+ }
+ }
+ }
+ }
+
+ RM_SCREEN_INFO.orig_x = x;
+ RM_SCREEN_INFO.orig_y = y;
+
+ pos = (x + cols * y) * 2; /* Update cursor position */
+ outb_p(14, vidport);
+ outb_p(0xff & (pos >> 9), vidport+1);
+ outb_p(15, vidport);
+ outb_p(0xff & (pos >> 1), vidport+1);
+}
+
+static void* memset(void* s, int c, unsigned n)
+{
+ int i;
+ char *ss = (char*)s;
+
+ for (i=0;i<n;i++) ss[i] = c;
+ return s;
+}
+
+static void* memcpy(void* dest, const void* src, unsigned n)
+{
+ int i;
+ char *d = (char *)dest, *s = (char *)src;
+
+ for (i=0;i<n;i++) d[i] = s[i];
+ return dest;
+}
+
+/* ===========================================================================
+ * Fill the input buffer. This is called only when the buffer is empty
+ * and at least one byte is really needed.
+ */
+static int fill_inbuf(void)
+{
+ error("ran out of input data");
+ return 0;
+}
+
+/* ===========================================================================
+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+ * (Used for the decompressed data only.)
+ */
+static void flush_window(void)
+{
+ /* With my window equal to my output buffer
+ * I only need to compute the crc here.
+ */
+ ulg c = crc; /* temporary variable */
+ unsigned n;
+ uch *in, ch;
+
+ in = window;
+ for (n = 0; n < outcnt; n++) {
+ ch = *in++;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ outcnt = 0;
+}
+
+static void error(char *x)
+{
+ putstr("\n\n");
+ putstr(x);
+ putstr("\n\n -- System halted");
+
+ while(1); /* Halt */
+}
+
+asmlinkage void decompress_kernel(void *rmode, unsigned long heap,
+ uch *input_data, unsigned long input_len, uch *output)
+{
+ real_mode = rmode;
+
+ if (RM_SCREEN_INFO.orig_video_mode == 7) {
+ vidmem = (char *) 0xb0000;
+ vidport = 0x3b4;
+ } else {
+ vidmem = (char *) 0xb8000;
+ vidport = 0x3d4;
+ }
+
+ lines = RM_SCREEN_INFO.orig_video_lines;
+ cols = RM_SCREEN_INFO.orig_video_cols;
+
+ window = output; /* Output buffer (Normally at 1M) */
+ free_mem_ptr = heap; /* Heap */
+ free_mem_end_ptr = heap + HEAP_SIZE;
+ inbuf = input_data; /* Input buffer */
+ insize = input_len;
+ inptr = 0;
+
+ if ((ulg)output & (__KERNEL_ALIGN - 1))
+ error("Destination address not 2M aligned");
+ if ((ulg)output >= 0xffffffffffUL)
+ error("Destination address too large");
+
+ makecrc();
+ putstr(".\nDecompressing LWK...");
+ gunzip();
+ putstr("done.\nBooting the kernel.\n");
+ return;
+}
--- /dev/null
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(startup_64)
+SECTIONS
+{
+ /* Be careful parts of head.S assume startup_32 is at
+ * address 0.
+ */
+ . = 0;
+ .text : {
+ _head = . ;
+ *(.text.head)
+ _ehead = . ;
+ *(.text.compressed)
+ _text = .; /* Text */
+ *(.text)
+ *(.text.*)
+ _etext = . ;
+ }
+ .rodata : {
+ _rodata = . ;
+ *(.rodata) /* read-only data */
+ *(.rodata.*)
+ _erodata = . ;
+ }
+ .data : {
+ _data = . ;
+ *(.data)
+ *(.data.*)
+ _edata = . ;
+ }
+ .bss : {
+ _bss = . ;
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ . = ALIGN(8);
+ _end = . ;
+ . = ALIGN(4096);
+ pgtable = . ;
+ . = . + 4096 * 6;
+ _heap = .;
+ }
+}
--- /dev/null
+SECTIONS
+{
+ .text.compressed : {
+ input_len = .;
+ LONG(input_data_end - input_data) input_data = .;
+ *(.data)
+ output_len = . - 4;
+ input_data_end = .;
+ }
+}
--- /dev/null
+/*
+ * setup.S Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * setup.s is responsible for getting the system data from the BIOS,
+ * and putting them into the appropriate places in system memory.
+ * both setup.s and system has been loaded by the bootblock.
+ *
+ * This code asks the bios for memory/disk/other parameters, and
+ * puts them in a "safe" place: 0x90000-0x901FF, ie where the
+ * boot-block used to be. It is then up to the protected mode
+ * system to read them from there before the area is overwritten
+ * for buffer-blocks.
+ *
+ * Move PS/2 aux init code to psaux.c
+ * (troyer@saifr00.cfsat.Honeywell.COM) 03Oct92
+ *
+ * some changes and additional features by Christoph Niemann,
+ * March 1993/June 1994 (Christoph.Niemann@linux.org)
+ *
+ * add APM BIOS checking by Stephen Rothwell, May 1994
+ * (sfr@canb.auug.org.au)
+ *
+ * High load stuff, initrd support and position independency
+ * by Hans Lermen & Werner Almesberger, February 1996
+ * <lermen@elserv.ffm.fgan.de>, <almesber@lrc.epfl.ch>
+ *
+ * Video handling moved to video.S by Martin Mares, March 1996
+ * <mj@k332.feld.cvut.cz>
+ *
+ * Extended memory detection scheme retwiddled by orc@pell.chi.il.us (david
+ * parsons) to avoid loadlin confusion, July 1997
+ *
+ * Transcribed from Intel (as86) -> AT&T (gas) by Chris Noe, May 1999.
+ * <stiker@northlink.com>
+ *
+ * Fix to work around buggy BIOSes which don't use carry bit correctly
+ * and/or report extended memory in CX/DX for e801h memory size detection
+ * call. As a result the kernel got wrong figures. The int15/e801h docs
+ * from Ralf Brown interrupt list seem to indicate AX/BX should be used
+ * anyway. So to avoid breaking many machines (presumably there was a reason
+ * to orginally use CX/DX instead of AX/BX), we do a kludge to see
+ * if CX/DX have been changed in the e801 call and if so use AX/BX .
+ * Michael Miller, April 2001 <michaelm@mjmm.org>
+ *
+ * Added long mode checking and SSE force. March 2003, Andi Kleen.
+ */
+
+#include <arch/segment.h>
+#include <lwk/version.h>
+#include <lwk/compile.h>
+#include <lwk/init.h>
+#include <arch/boot.h>
+#include <arch/e820.h>
+#include <arch/page.h>
+
+/* Signature words to ensure LILO loaded us right */
+#define SIG1 0xAA55
+#define SIG2 0x5A5A
+
+INITSEG = DEF_INITSEG # 0x9000, we move boot here, out of the way
+SYSSEG = DEF_SYSSEG # 0x1000, system loaded at 0x10000 (65536).
+SETUPSEG = DEF_SETUPSEG # 0x9020, this is the current segment
+ # ... and the former contents of CS
+
+DELTA_INITSEG = SETUPSEG - INITSEG # 0x0020
+
+.code16
+.globl begtext, begdata, begbss, endtext, enddata, endbss
+
+.text
+begtext:
+.data
+begdata:
+.bss
+begbss:
+.text
+
+start:
+ jmp trampoline
+
+# This is the setup header, and it must start at %cs:2 (old 0x9020:2)
+
+ .ascii "HdrS" # header signature
+ .word 0x0206 # header version number (>= 0x0105)
+ # or else old loadlin-1.5 will fail)
+realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
+start_sys_seg: .word SYSSEG
+ .word kernel_version # pointing to kernel version string
+ # above section of header is compatible
+ # with loadlin-1.5 (header v1.5). Don't
+ # change it.
+
+type_of_loader: .byte 0 # = 0, old one (LILO, Loadlin,
+ # Bootlin, SYSLX, bootsect...)
+ # See Documentation/i386/boot.txt for
+ # assigned ids
+
+# flags, unused bits must be zero (RFU) bit within loadflags
+loadflags:
+LOADED_HIGH = 1 # If set, the kernel is loaded high
+CAN_USE_HEAP = 0x80 # If set, the loader also has set
+ # heap_end_ptr to tell how much
+ # space behind setup.S can be used for
+ # heap purposes.
+ # Only the loader knows what is free
+#ifndef __BIG_KERNEL__
+ .byte 0
+#else
+ .byte LOADED_HIGH
+#endif
+
+setup_move_size: .word 0x8000 # size to move, when setup is not
+ # loaded at 0x90000. We will move setup
+ # to 0x90000 then just before jumping
+ # into the kernel. However, only the
+ # loader knows how much data behind
+ # us also needs to be loaded.
+
+code32_start: # here loaders can put a different
+ # start address for 32-bit code.
+#ifndef __BIG_KERNEL__
+ .long 0x1000 # 0x1000 = default for zImage
+#else
+ .long 0x100000 # 0x100000 = default for big kernel
+#endif
+
+ramdisk_image: .long 0 # address of loaded ramdisk image
+ # Here the loader puts the 32-bit
+ # address where it loaded the image.
+ # This only will be read by the kernel.
+
+ramdisk_size: .long 0 # its size in bytes
+
+bootsect_kludge:
+ .long 0 # obsolete
+
+heap_end_ptr: .word modelist+1024 # (Header version 0x0201 or later)
+ # space from here (exclusive) down to
+ # end of setup code can be used by setup
+ # for local heap purposes.
+
+pad1: .word 0
+cmd_line_ptr: .long 0 # (Header version 0x0202 or later)
+ # If nonzero, a 32-bit pointer
+ # to the kernel command line.
+ # The command line should be
+ # located between the start of
+ # setup and the end of low
+ # memory (0xa0000), or it may
+ # get overwritten before it
+ # gets read. If this field is
+ # used, there is no longer
+ # anything magical about the
+ # 0x90000 segment; the setup
+ # can be located anywhere in
+ # low memory 0x10000 or higher.
+
+ramdisk_max: .long 0xffffffff
+kernel_alignment: .long 0x200000 # physical addr alignment required for
+ # protected mode relocatable kernel
+#ifdef CONFIG_RELOCATABLE
+relocatable_kernel: .byte 1
+#else
+relocatable_kernel: .byte 0
+#endif
+pad2: .byte 0
+pad3: .word 0
+
+cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line,
+ #added with boot protocol
+ #version 2.06
+
+trampoline: call start_of_setup
+ .align 16
+ # The offset at this point is 0x240
+ .space (0xeff-0x240+1) # E820 & EDD space (ending at 0xeff)
+# End of setup header #####################################################
+
+start_of_setup:
+# Bootlin depends on this being done early
+ movw $0x01500, %ax
+ movb $0x81, %dl
+ int $0x13
+
+#ifdef SAFE_RESET_DISK_CONTROLLER
+# Reset the disk controller.
+ movw $0x0000, %ax
+ movb $0x80, %dl
+ int $0x13
+#endif
+
+# Set %ds = %cs, we know that SETUPSEG = %cs at this point
+ movw %cs, %ax # aka SETUPSEG
+ movw %ax, %ds
+# Check signature at end of setup
+ cmpw $SIG1, setup_sig1
+ jne bad_sig
+
+ cmpw $SIG2, setup_sig2
+ jne bad_sig
+
+ jmp good_sig1
+
+# Routine to print asciiz string at ds:si
+prtstr:
+ lodsb
+ andb %al, %al
+ jz fin
+
+ call prtchr
+ jmp prtstr
+
+fin: ret
+
+# Space printing
+prtsp2: call prtspc # Print double space
+prtspc: movb $0x20, %al # Print single space (note: fall-thru)
+
+prtchr:
+ pushw %ax
+ pushw %cx
+ movw $0007,%bx
+ movw $0x01, %cx
+ movb $0x0e, %ah
+ int $0x10
+ popw %cx
+ popw %ax
+ ret
+
+beep: movb $0x07, %al
+ jmp prtchr
+
+no_sig_mess: .string "No setup signature found ..."
+
+good_sig1:
+ jmp good_sig
+
+# We now have to find the rest of the setup code/data
+bad_sig:
+ movw %cs, %ax # SETUPSEG
+ subw $DELTA_INITSEG, %ax # INITSEG
+ movw %ax, %ds
+ xorb %bh, %bh
+ movb (497), %bl # get setup sect from bootsect
+ subw $4, %bx # LILO loads 4 sectors of setup
+ shlw $8, %bx # convert to words (1sect=2^8 words)
+ movw %bx, %cx
+ shrw $3, %bx # convert to segment
+ addw $SYSSEG, %bx
+ movw %bx, %cs:start_sys_seg
+# Move rest of setup code/data to here
+ movw $2048, %di # four sectors loaded by LILO
+ subw %si, %si
+ movw %cs, %ax # aka SETUPSEG
+ movw %ax, %es
+ movw $SYSSEG, %ax
+ movw %ax, %ds
+ rep
+ movsw
+ movw %cs, %ax # aka SETUPSEG
+ movw %ax, %ds
+ cmpw $SIG1, setup_sig1
+ jne no_sig
+
+ cmpw $SIG2, setup_sig2
+ jne no_sig
+
+ jmp good_sig
+
+no_sig:
+ lea no_sig_mess, %si
+ call prtstr
+
+no_sig_loop:
+ jmp no_sig_loop
+
+good_sig:
+ movw %cs, %ax # aka SETUPSEG
+ subw $DELTA_INITSEG, %ax # aka INITSEG
+ movw %ax, %ds
+# Check if an old loader tries to load a big-kernel
+ testb $LOADED_HIGH, %cs:loadflags # Do we have a big kernel?
+ jz loader_ok # No, no danger for old loaders.
+
+ cmpb $0, %cs:type_of_loader # Do we have a loader that
+ # can deal with us?
+ jnz loader_ok # Yes, continue.
+
+ pushw %cs # No, we have an old loader,
+ popw %ds # die.
+ lea loader_panic_mess, %si
+ call prtstr
+
+ jmp no_sig_loop
+
+loader_panic_mess: .string "Wrong loader, giving up..."
+
+loader_ok:
+ /* check for long mode. */
+ /* we have to do this before the VESA setup, otherwise the user
+ can't see the error message. */
+
+ pushw %ds
+ movw %cs,%ax
+ movw %ax,%ds
+
+ call verify_cpu
+ testl %eax,%eax
+ jz sse_ok
+
+no_longmode:
+ call beep
+ lea long_mode_panic,%si
+ call prtstr
+no_longmode_loop:
+ jmp no_longmode_loop
+long_mode_panic:
+ .string "BOOT FAILURE: This system does not support x86_64 long mode."
+ .byte 0
+
+#include "../kernel/verify_cpu.S"
+sse_ok:
+ popw %ds
+
+# tell BIOS we want to go to long mode
+ movl $0xec00,%eax # declare target operating mode
+ movl $2,%ebx # long mode
+ int $0x15
+
+# Get memory size (extended mem, kB)
+
+ xorl %eax, %eax
+ movl %eax, (0x1e0)
+#ifndef STANDARD_MEMORY_BIOS_CALL
+ movb %al, (E820NR)
+# Try three different memory detection schemes. First, try
+# e820h, which lets us assemble a memory map, then try e801h,
+# which returns a 32-bit memory size, and finally 88h, which
+# returns 0-64m
+
+# method E820H:
+# the memory map from hell. e820h returns memory classified into
+# a whole bunch of different types, and allows memory holes and
+# everything. We scan through this memory map and build a list
+# of the first 32 memory areas, which we return at [E820MAP].
+# This is documented at http://www.acpi.info/, in the ACPI 2.0 specification.
+
+#define SMAP 0x534d4150
+
+meme820:
+ xorl %ebx, %ebx # continuation counter
+ movw $E820MAP, %di # point into the whitelist
+ # so we can have the bios
+ # directly write into it.
+
+jmpe820:
+ movl $0x0000e820, %eax # e820, upper word zeroed
+ movl $SMAP, %edx # ascii 'SMAP'
+ movl $20, %ecx # size of the e820rec
+ pushw %ds # data record.
+ popw %es
+ int $0x15 # make the call
+ jc bail820 # fall to e801 if it fails
+
+ cmpl $SMAP, %eax # check the return is `SMAP'
+ jne bail820 # fall to e801 if it fails
+
+# cmpl $1, 16(%di) # is this usable memory?
+# jne again820
+
+ # If this is usable memory, we save it by simply advancing %di by
+ # sizeof(e820rec).
+ #
+good820:
+ movb (E820NR), %al # up to 128 entries
+ cmpb $E820MAX, %al
+ jae bail820
+
+ incb (E820NR)
+ movw %di, %ax
+ addw $20, %ax
+ movw %ax, %di
+again820:
+ cmpl $0, %ebx # check to see if
+ jne jmpe820 # %ebx is set to EOF
+bail820:
+
+
+# method E801H:
+# memory size is in 1k chunksizes, to avoid confusing loadlin.
+# we store the 0xe801 memory size in a completely different place,
+# because it will most likely be longer than 16 bits.
+# (use 1e0 because that's what Larry Augustine uses in his
+# alternative new memory detection scheme, and it's sensible
+# to write everything into the same place.)
+
+meme801:
+ stc # fix to work around buggy
+ xorw %cx,%cx # BIOSes which don't clear/set
+ xorw %dx,%dx # carry on pass/error of
+ # e801h memory size call
+ # or merely pass cx,dx though
+ # without changing them.
+ movw $0xe801, %ax
+ int $0x15
+ jc mem88
+
+ cmpw $0x0, %cx # Kludge to handle BIOSes
+ jne e801usecxdx # which report their extended
+ cmpw $0x0, %dx # memory in AX/BX rather than
+ jne e801usecxdx # CX/DX. The spec I have read
+ movw %ax, %cx # seems to indicate AX/BX
+ movw %bx, %dx # are more reasonable anyway...
+
+e801usecxdx:
+ andl $0xffff, %edx # clear sign extend
+ shll $6, %edx # and go from 64k to 1k chunks
+ movl %edx, (0x1e0) # store extended memory size
+ andl $0xffff, %ecx # clear sign extend
+ addl %ecx, (0x1e0) # and add lower memory into
+ # total size.
+
+# Ye Olde Traditional Methode. Returns the memory size (up to 16mb or
+# 64mb, depending on the bios) in ax.
+mem88:
+
+#endif
+ movb $0x88, %ah
+ int $0x15
+ movw %ax, (2)
+
+# Set the keyboard repeat rate to the max
+ movw $0x0305, %ax
+ xorw %bx, %bx
+ int $0x16
+
+# Check for video adapter and its parameters and allow the
+# user to browse video modes.
+ call video # NOTE: we need %ds pointing
+ # to bootsector
+
+# Get hd0 data...
+ xorw %ax, %ax
+ movw %ax, %ds
+ ldsw (4 * 0x41), %si
+ movw %cs, %ax # aka SETUPSEG
+ subw $DELTA_INITSEG, %ax # aka INITSEG
+ pushw %ax
+ movw %ax, %es
+ movw $0x0080, %di
+ movw $0x10, %cx
+ pushw %cx
+ cld
+ rep
+ movsb
+# Get hd1 data...
+ xorw %ax, %ax
+ movw %ax, %ds
+ ldsw (4 * 0x46), %si
+ popw %cx
+ popw %es
+ movw $0x0090, %di
+ rep
+ movsb
+# Check that there IS a hd1 :-)
+ movw $0x01500, %ax
+ movb $0x81, %dl
+ int $0x13
+ jc no_disk1
+
+ cmpb $3, %ah
+ je is_disk1
+
+no_disk1:
+ movw %cs, %ax # aka SETUPSEG
+ subw $DELTA_INITSEG, %ax # aka INITSEG
+ movw %ax, %es
+ movw $0x0090, %di
+ movw $0x10, %cx
+ xorw %ax, %ax
+ cld
+ rep
+ stosb
+is_disk1:
+
+# Check for PS/2 pointing device
+ movw %cs, %ax # aka SETUPSEG
+ subw $DELTA_INITSEG, %ax # aka INITSEG
+ movw %ax, %ds
+ movb $0, (0x1ff) # default is no pointing device
+ int $0x11 # int 0x11: equipment list
+ testb $0x04, %al # check if mouse installed
+ jz no_psmouse
+
+ movb $0xAA, (0x1ff) # device present
+no_psmouse:
+
+# Now we want to move to protected mode ...
+ cmpw $0, %cs:realmode_swtch
+ jz rmodeswtch_normal
+
+ lcall *%cs:realmode_swtch
+
+ jmp rmodeswtch_end
+
+rmodeswtch_normal:
+ pushw %cs
+ call default_switch
+
+rmodeswtch_end:
+# we get the code32 start address and modify the below 'jmpi'
+# (loader may have changed it)
+ movl %cs:code32_start, %eax
+ movl %eax, %cs:code32
+
+# Now we move the system to its rightful place ... but we check if we have a
+# big-kernel. In that case we *must* not move it ...
+ testb $LOADED_HIGH, %cs:loadflags
+ jz do_move0 # .. then we have a normal low
+ # loaded zImage
+ # .. or else we have a high
+ # loaded bzImage
+ jmp end_move # ... and we skip moving
+
+do_move0:
+ movw $0x100, %ax # start of destination segment
+ movw %cs, %bp # aka SETUPSEG
+ subw $DELTA_INITSEG, %bp # aka INITSEG
+ movw %cs:start_sys_seg, %bx # start of source segment
+ cld
+do_move:
+ movw %ax, %es # destination segment
+ incb %ah # instead of add ax,#0x100
+ movw %bx, %ds # source segment
+ addw $0x100, %bx
+ subw %di, %di
+ subw %si, %si
+ movw $0x800, %cx
+ rep
+ movsw
+ cmpw %bp, %bx # assume start_sys_seg > 0x200,
+ # so we will perhaps read one
+ # page more than needed, but
+ # never overwrite INITSEG
+ # because destination is a
+ # minimum one page below source
+ jb do_move
+
+end_move:
+# then we load the segment descriptors
+ movw %cs, %ax # aka SETUPSEG
+ movw %ax, %ds
+
+# Check whether we need to be downward compatible with version <=201
+ cmpl $0, cmd_line_ptr
+ jne end_move_self # loader uses version >=202 features
+ cmpb $0x20, type_of_loader
+ je end_move_self # bootsect loader, we know of it
+
+# Boot loader doesnt support boot protocol version 2.02.
+# If we have our code not at 0x90000, we need to move it there now.
+# We also then need to move the params behind it (commandline)
+# Because we would overwrite the code on the current IP, we move
+# it in two steps, jumping high after the first one.
+ movw %cs, %ax
+ cmpw $SETUPSEG, %ax
+ je end_move_self
+
+ cli # make sure we really have
+ # interrupts disabled !
+ # because after this the stack
+ # should not be used
+ subw $DELTA_INITSEG, %ax # aka INITSEG
+ movw %ss, %dx
+ cmpw %ax, %dx
+ jb move_self_1
+
+ addw $INITSEG, %dx
+ subw %ax, %dx # this will go into %ss after
+ # the move
+move_self_1:
+ movw %ax, %ds
+ movw $INITSEG, %ax # real INITSEG
+ movw %ax, %es
+ movw %cs:setup_move_size, %cx
+ std # we have to move up, so we use
+ # direction down because the
+ # areas may overlap
+ movw %cx, %di
+ decw %di
+ movw %di, %si
+ subw $move_self_here+0x200, %cx
+ rep
+ movsb
+ ljmp $SETUPSEG, $move_self_here
+
+move_self_here:
+ movw $move_self_here+0x200, %cx
+ rep
+ movsb
+ movw $SETUPSEG, %ax
+ movw %ax, %ds
+ movw %dx, %ss
+end_move_self: # now we are at the right place
+ lidt idt_48 # load idt with 0,0
+ xorl %eax, %eax # Compute gdt_base
+ movw %ds, %ax # (Convert %ds:gdt to a linear ptr)
+ shll $4, %eax
+ addl $gdt, %eax
+ movl %eax, (gdt_48+2)
+ lgdt gdt_48 # load gdt with whatever is
+ # appropriate
+
+# that was painless, now we enable a20
+ call empty_8042
+
+ movb $0xD1, %al # command write
+ outb %al, $0x64
+ call empty_8042
+
+ movb $0xDF, %al # A20 on
+ outb %al, $0x60
+ call empty_8042
+
+#
+# You must preserve the other bits here. Otherwise embarrasing things
+# like laptops powering off on boot happen. Corrected version by Kira
+# Brown from Linux 2.2
+#
+ inb $0x92, %al #
+ orb $02, %al # "fast A20" version
+ outb %al, $0x92 # some chips have only this
+
+# wait until a20 really *is* enabled; it can take a fair amount of
+# time on certain systems; Toshiba Tecras are known to have this
+# problem. The memory location used here (0x200) is the int 0x80
+# vector, which should be safe to use.
+
+ xorw %ax, %ax # segment 0x0000
+ movw %ax, %fs
+ decw %ax # segment 0xffff (HMA)
+ movw %ax, %gs
+a20_wait:
+ incw %ax # unused memory location <0xfff0
+ movw %ax, %fs:(0x200) # we use the "int 0x80" vector
+ cmpw %gs:(0x210), %ax # and its corresponding HMA addr
+ je a20_wait # loop until no longer aliased
+
+# make sure any possible coprocessor is properly reset..
+ xorw %ax, %ax
+ outb %al, $0xf0
+ call delay
+
+ outb %al, $0xf1
+ call delay
+
+# well, that went ok, I hope. Now we mask all interrupts - the rest
+# is done in init_IRQ().
+ movb $0xFF, %al # mask all interrupts for now
+ outb %al, $0xA1
+ call delay
+
+ movb $0xFB, %al # mask all irq's but irq2 which
+ outb %al, $0x21 # is cascaded
+
+# Well, that certainly wasn't fun :-(. Hopefully it works, and we don't
+# need no steenking BIOS anyway (except for the initial loading :-).
+# The BIOS-routine wants lots of unnecessary data, and it's less
+# "interesting" anyway. This is how REAL programmers do it.
+#
+# Well, now's the time to actually move into protected mode. To make
+# things as simple as possible, we do no register set-up or anything,
+# we let the gnu-compiled 32-bit programs do that. We just jump to
+# absolute address 0x1000 (or the loader supplied one),
+# in 32-bit protected mode.
+#
+# Note that the short jump isn't strictly needed, although there are
+# reasons why it might be a good idea. It won't hurt in any case.
+ movw $1, %ax # protected mode (PE) bit
+ lmsw %ax # This is it!
+ jmp flush_instr
+
+flush_instr:
+ xorw %bx, %bx # Flag to indicate a boot
+ xorl %esi, %esi # Pointer to real-mode code
+ movw %cs, %si
+ subw $DELTA_INITSEG, %si
+ shll $4, %esi # Convert to 32-bit pointer
+# NOTE: For high loaded big kernels we need a
+# jmpi 0x100000,__KERNEL_CS
+#
+# but we yet haven't reloaded the CS register, so the default size
+# of the target offset still is 16 bit.
+# However, using an operand prefix (0x66), the CPU will properly
+# take our 48 bit far pointer. (INTeL 80386 Programmer's Reference
+# Manual, Mixing 16-bit and 32-bit code, page 16-6)
+
+ .byte 0x66, 0xea # prefix + jmpi-opcode
+code32: .long 0x1000 # will be set to 0x100000
+ # for big kernels
+ .word __KERNEL_CS
+
+# Here's a bunch of information about your current kernel..
+kernel_version: .ascii UTS_RELEASE
+ .ascii " ("
+ .ascii LWK_COMPILE_BY
+ .ascii "@"
+ .ascii LWK_COMPILE_HOST
+ .ascii ") "
+ .ascii UTS_VERSION
+ .byte 0
+
+# This is the default real mode switch routine.
+# to be called just before protected mode transition
+default_switch:
+ cli # no interrupts allowed !
+ movb $0x80, %al # disable NMI for bootup
+ # sequence
+ outb %al, $0x70
+ lret
+
+
+# This routine checks that the keyboard command queue is empty
+# (after emptying the output buffers)
+#
+# Some machines have delusions that the keyboard buffer is always full
+# with no keyboard attached...
+#
+# If there is no keyboard controller, we will usually get 0xff
+# to all the reads. With each IO taking a microsecond and
+# a timeout of 100,000 iterations, this can take about half a
+# second ("delay" == outb to port 0x80). That should be ok,
+# and should also be plenty of time for a real keyboard controller
+# to empty.
+#
+
+empty_8042:
+ pushl %ecx
+ movl $100000, %ecx
+
+empty_8042_loop:
+ decl %ecx
+ jz empty_8042_end_loop
+
+ call delay
+
+ inb $0x64, %al # 8042 status port
+ testb $1, %al # output buffer?
+ jz no_output
+
+ call delay
+ inb $0x60, %al # read it
+ jmp empty_8042_loop
+
+no_output:
+ testb $2, %al # is input buffer full?
+ jnz empty_8042_loop # yes - loop
+empty_8042_end_loop:
+ popl %ecx
+ ret
+
+# Read the cmos clock. Return the seconds in al
+gettime:
+ pushw %cx
+ movb $0x02, %ah
+ int $0x1a
+ movb %dh, %al # %dh contains the seconds
+ andb $0x0f, %al
+ movb %dh, %ah
+ movb $0x04, %cl
+ shrb %cl, %ah
+ aad
+ popw %cx
+ ret
+
+# Delay is needed after doing I/O
+delay:
+ outb %al,$0x80
+ ret
+
+# Descriptor tables
+gdt:
+ .word 0, 0, 0, 0 # dummy
+
+ .word 0, 0, 0, 0 # unused
+
+ .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
+ .word 0 # base address = 0
+ .word 0x9A00 # code read/exec
+ .word 0x00CF # granularity = 4096, 386
+ # (+5th nibble of limit)
+
+ .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
+ .word 0 # base address = 0
+ .word 0x9200 # data read/write
+ .word 0x00CF # granularity = 4096, 386
+ # (+5th nibble of limit)
+gdt_end:
+idt_48:
+ .word 0 # idt limit = 0
+ .word 0, 0 # idt base = 0L
+gdt_48:
+ .word gdt_end-gdt-1 # gdt limit
+ .word 0, 0 # gdt base (filled in later)
+
+# Include video setup & detection code
+
+#include "video.S"
+
+# Setup signature -- must be last
+setup_sig1: .word SIG1
+setup_sig2: .word SIG2
+
+# After this point, there is some free space which is used by the video mode
+# handling code to store the temporary mode table (not used by the kernel).
+
+modelist:
+
+.text
+endtext:
+.data
+enddata:
+.bss
+endbss:
--- /dev/null
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares
+ */
+
+/*
+ * This file builds a disk-image from three different files:
+ *
+ * - bootsect: Compatibility mbr which prints an error message if
+ * someone tries to boot the kernel directly.
+ * - setup: 8086 machine code, sets up system parm
+ * - vmlwk.bin: The "piggy" LWK kernel image. The first part of the
+ * image is the decompression code (compressed/head.o),
+ * which begins executing at startup_32. startup_32 then
+ * uncompresses the real kernel image that follows it.
+ *
+ * It does some checking that all files are of the correct type, and
+ * just writes the result to stdout, removing headers and padding to
+ * the right amount. It also writes some system data to stderr.
+ */
+
+/*
+ * Changes by tytso to allow root device specification
+ * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
+ * Cross compiling fixes by Gertjan van Wingerde, July 1996
+ * Rewritten by Martin Mares, April 1997
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <asm/boot.h>
+
+typedef unsigned char byte;
+typedef unsigned short word;
+typedef unsigned long u32;
+
+#define DEFAULT_MAJOR_ROOT 0
+#define DEFAULT_MINOR_ROOT 0
+
+/* Minimal number of setup sectors (see also bootsect.S) */
+#define SETUP_SECTS 4
+
+byte buf[1024];
+int fd;
+int is_big_kernel;
+
+void die(const char * str, ...)
+{
+ va_list args;
+ va_start(args, str);
+ vfprintf(stderr, str, args);
+ fputc('\n', stderr);
+ exit(1);
+}
+
+void file_open(const char *name)
+{
+ if ((fd = open(name, O_RDONLY, 0)) < 0)
+ die("Unable to open `%s': %m", name);
+}
+
+void usage(void)
+{
+ die("Usage: build [-b] bootsect setup system [rootdev] [> image]");
+}
+
+int main(int argc, char ** argv)
+{
+ unsigned int i, c, sz, setup_sectors;
+ u32 sys_size;
+ byte major_root, minor_root;
+ struct stat sb;
+
+ if (argc > 2 && !strcmp(argv[1], "-b"))
+ {
+ is_big_kernel = 1;
+ argc--, argv++;
+ }
+ if ((argc < 4) || (argc > 5))
+ usage();
+ if (argc > 4) {
+ if (!strcmp(argv[4], "CURRENT")) {
+ if (stat("/", &sb)) {
+ perror("/");
+ die("Couldn't stat /");
+ }
+ major_root = major(sb.st_dev);
+ minor_root = minor(sb.st_dev);
+ } else if (strcmp(argv[4], "FLOPPY")) {
+ if (stat(argv[4], &sb)) {
+ perror(argv[4]);
+ die("Couldn't stat root device.");
+ }
+ major_root = major(sb.st_rdev);
+ minor_root = minor(sb.st_rdev);
+ } else {
+ major_root = 0;
+ minor_root = 0;
+ }
+ } else {
+ major_root = DEFAULT_MAJOR_ROOT;
+ minor_root = DEFAULT_MINOR_ROOT;
+ }
+ fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root);
+
+ file_open(argv[1]);
+ i = read(fd, buf, sizeof(buf));
+ fprintf(stderr,"Boot sector %d bytes.\n",i);
+ if (i != 512)
+ die("Boot block must be exactly 512 bytes");
+ if (buf[510] != 0x55 || buf[511] != 0xaa)
+ die("Boot block hasn't got boot flag (0xAA55)");
+ buf[508] = minor_root;
+ buf[509] = major_root;
+ if (write(1, buf, 512) != 512)
+ die("Write call failed");
+ close (fd);
+
+ file_open(argv[2]); /* Copy the setup code */
+ for (i=0 ; (c=read(fd, buf, sizeof(buf)))>0 ; i+=c )
+ if (write(1, buf, c) != c)
+ die("Write call failed");
+ if (c != 0)
+ die("read-error on `setup'");
+ close (fd);
+
+ setup_sectors = (i + 511) / 512; /* Pad unused space with zeros */
+ /* for compatibility with ancient versions of LILO. */
+ if (setup_sectors < SETUP_SECTS)
+ setup_sectors = SETUP_SECTS;
+ fprintf(stderr, "Setup is %d bytes.\n", i);
+ memset(buf, 0, sizeof(buf));
+ while (i < setup_sectors * 512) {
+ c = setup_sectors * 512 - i;
+ if (c > sizeof(buf))
+ c = sizeof(buf);
+ if (write(1, buf, c) != c)
+ die("Write call failed");
+ i += c;
+ }
+
+ file_open(argv[3]);
+ if (fstat (fd, &sb))
+ die("Unable to stat `%s': %m", argv[3]);
+ sz = sb.st_size;
+ fprintf (stderr, "System is %d kB\n", sz/1024);
+ sys_size = (sz + 15) / 16;
+ /* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */
+ if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE))
+ die("System is too big. Try using %smodules.",
+ is_big_kernel ? "" : "bzImage or ");
+ while (sz > 0) {
+ int l, n;
+
+ l = (sz > sizeof(buf)) ? sizeof(buf) : sz;
+ if ((n=read(fd, buf, l)) != l) {
+ if (n < 0)
+ die("Error reading %s: %m", argv[3]);
+ else
+ die("%s: Unexpected EOF", argv[3]);
+ }
+ if (write(1, buf, l) != l)
+ die("Write failed");
+ sz -= l;
+ }
+ close(fd);
+
+ if (lseek(1, 497, SEEK_SET) != 497) /* Write sizes to the bootsector */
+ die("Output: seek failed");
+ buf[0] = setup_sectors;
+ if (write(1, buf, 1) != 1)
+ die("Write of setup sector count failed");
+ if (lseek(1, 500, SEEK_SET) != 500)
+ die("Output: seek failed");
+ buf[0] = (sys_size & 0xff);
+ buf[1] = ((sys_size >> 8) & 0xff);
+ buf[2] = ((sys_size >> 16) & 0xff);
+ buf[3] = ((sys_size >> 24) & 0xff);
+ if (write(1, buf, 4) != 4)
+ die("Write of image length failed");
+
+ return 0; /* Everything is OK */
+}
--- /dev/null
+/* video.S
+ *
+ * Display adapter & video mode setup, version 2.13 (14-May-99)
+ *
+ * Copyright (C) 1995 -- 1998 Martin Mares <mj@ucw.cz>
+ * Based on the original setup.S code (C) Linus Torvalds and Mats Anderson
+ *
+ * Rewritten to use GNU 'as' by Chris Noe <stiker@northlink.com> May 1999
+ *
+ * For further information, look at Documentation/svga.txt.
+ *
+ */
+
+/* Enable autodetection of SVGA adapters and modes. */
+#undef CONFIG_VIDEO_SVGA
+
+/* Enable autodetection of VESA modes */
+#define CONFIG_VIDEO_VESA
+
+/* Enable compacting of mode table */
+#define CONFIG_VIDEO_COMPACT
+
+/* Retain screen contents when switching modes */
+#define CONFIG_VIDEO_RETAIN
+
+/* Enable local mode list */
+#undef CONFIG_VIDEO_LOCAL
+
+/* Force 400 scan lines for standard modes (hack to fix bad BIOS behaviour */
+#undef CONFIG_VIDEO_400_HACK
+
+/* Hack that lets you force specific BIOS mode ID and specific dimensions */
+#undef CONFIG_VIDEO_GFX_HACK
+#define VIDEO_GFX_BIOS_AX 0x4f02 /* 800x600 on ThinkPad */
+#define VIDEO_GFX_BIOS_BX 0x0102
+#define VIDEO_GFX_DUMMY_RESOLUTION 0x6425 /* 100x37 */
+
+/* This code uses an extended set of video mode numbers. These include:
+ * Aliases for standard modes
+ * NORMAL_VGA (-1)
+ * EXTENDED_VGA (-2)
+ * ASK_VGA (-3)
+ * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
+ * of compatibility when extending the table. These are between 0x00 and 0xff.
+ */
+#define VIDEO_FIRST_MENU 0x0000
+
+/* Standard BIOS video modes (BIOS number + 0x0100) */
+#define VIDEO_FIRST_BIOS 0x0100
+
+/* VESA BIOS video modes (VESA number + 0x0200) */
+#define VIDEO_FIRST_VESA 0x0200
+
+/* Video7 special modes (BIOS number + 0x0900) */
+#define VIDEO_FIRST_V7 0x0900
+
+/* Special video modes */
+#define VIDEO_FIRST_SPECIAL 0x0f00
+#define VIDEO_80x25 0x0f00
+#define VIDEO_8POINT 0x0f01
+#define VIDEO_80x43 0x0f02
+#define VIDEO_80x28 0x0f03
+#define VIDEO_CURRENT_MODE 0x0f04
+#define VIDEO_80x30 0x0f05
+#define VIDEO_80x34 0x0f06
+#define VIDEO_80x60 0x0f07
+#define VIDEO_GFX_HACK 0x0f08
+#define VIDEO_LAST_SPECIAL 0x0f09
+
+/* Video modes given by resolution */
+#define VIDEO_FIRST_RESOLUTION 0x1000
+
+/* The "recalculate timings" flag */
+#define VIDEO_RECALC 0x8000
+
+/* Positions of various video parameters passed to the kernel */
+/* (see also include/linux/tty.h) */
+#define PARAM_CURSOR_POS 0x00
+#define PARAM_VIDEO_PAGE 0x04
+#define PARAM_VIDEO_MODE 0x06
+#define PARAM_VIDEO_COLS 0x07
+#define PARAM_VIDEO_EGA_BX 0x0a
+#define PARAM_VIDEO_LINES 0x0e
+#define PARAM_HAVE_VGA 0x0f
+#define PARAM_FONT_POINTS 0x10
+
+#define PARAM_LFB_WIDTH 0x12
+#define PARAM_LFB_HEIGHT 0x14
+#define PARAM_LFB_DEPTH 0x16
+#define PARAM_LFB_BASE 0x18
+#define PARAM_LFB_SIZE 0x1c
+#define PARAM_LFB_LINELENGTH 0x24
+#define PARAM_LFB_COLORS 0x26
+#define PARAM_VESAPM_SEG 0x2e
+#define PARAM_VESAPM_OFF 0x30
+#define PARAM_LFB_PAGES 0x32
+#define PARAM_VESA_ATTRIB 0x34
+#define PARAM_CAPABILITIES 0x36
+
+/* Define DO_STORE according to CONFIG_VIDEO_RETAIN */
+#ifdef CONFIG_VIDEO_RETAIN
+#define DO_STORE call store_screen
+#else
+#define DO_STORE
+#endif /* CONFIG_VIDEO_RETAIN */
+
+# This is the main entry point called by setup.S
+# %ds *must* be pointing to the bootsector
+video: pushw %ds # We use different segments
+ pushw %ds # FS contains original DS
+ popw %fs
+ pushw %cs # DS is equal to CS
+ popw %ds
+ pushw %cs # ES is equal to CS
+ popw %es
+ xorw %ax, %ax
+ movw %ax, %gs # GS is zero
+ cld
+ call basic_detect # Basic adapter type testing (EGA/VGA/MDA/CGA)
+#ifdef CONFIG_VIDEO_SELECT
+ movw %fs:(0x01fa), %ax # User selected video mode
+ cmpw $ASK_VGA, %ax # Bring up the menu
+ jz vid2
+
+ call mode_set # Set the mode
+ jc vid1
+
+ leaw badmdt, %si # Invalid mode ID
+ call prtstr
+vid2: call mode_menu
+vid1:
+#ifdef CONFIG_VIDEO_RETAIN
+ call restore_screen # Restore screen contents
+#endif /* CONFIG_VIDEO_RETAIN */
+ call store_edid
+#endif /* CONFIG_VIDEO_SELECT */
+ call mode_params # Store mode parameters
+ popw %ds # Restore original DS
+ ret
+
+# Detect if we have CGA, MDA, EGA or VGA and pass it to the kernel.
+basic_detect:
+ movb $0, %fs:(PARAM_HAVE_VGA)
+ movb $0x12, %ah # Check EGA/VGA
+ movb $0x10, %bl
+ int $0x10
+ movw %bx, %fs:(PARAM_VIDEO_EGA_BX) # Identifies EGA to the kernel
+ cmpb $0x10, %bl # No, it's a CGA/MDA/HGA card.
+ je basret
+
+ incb adapter
+ movw $0x1a00, %ax # Check EGA or VGA?
+ int $0x10
+ cmpb $0x1a, %al # 1a means VGA...
+ jne basret # anything else is EGA.
+
+ incb %fs:(PARAM_HAVE_VGA) # We've detected a VGA
+ incb adapter
+basret: ret
+
+# Store the video mode parameters for later usage by the kernel.
+# This is done by asking the BIOS except for the rows/columns
+# parameters in the default 80x25 mode -- these are set directly,
+# because some very obscure BIOSes supply insane values.
+mode_params:
+#ifdef CONFIG_VIDEO_SELECT
+ cmpb $0, graphic_mode
+ jnz mopar_gr
+#endif
+ movb $0x03, %ah # Read cursor position
+ xorb %bh, %bh
+ int $0x10
+ movw %dx, %fs:(PARAM_CURSOR_POS)
+ movb $0x0f, %ah # Read page/mode/width
+ int $0x10
+ movw %bx, %fs:(PARAM_VIDEO_PAGE)
+ movw %ax, %fs:(PARAM_VIDEO_MODE) # Video mode and screen width
+ cmpb $0x7, %al # MDA/HGA => segment differs
+ jnz mopar0
+
+ movw $0xb000, video_segment
+mopar0: movw %gs:(0x485), %ax # Font size
+ movw %ax, %fs:(PARAM_FONT_POINTS) # (valid only on EGA/VGA)
+ movw force_size, %ax # Forced size?
+ orw %ax, %ax
+ jz mopar1
+
+ movb %ah, %fs:(PARAM_VIDEO_COLS)
+ movb %al, %fs:(PARAM_VIDEO_LINES)
+ ret
+
+mopar1: movb $25, %al
+ cmpb $0, adapter # If we are on CGA/MDA/HGA, the
+ jz mopar2 # screen must have 25 lines.
+
+ movb %gs:(0x484), %al # On EGA/VGA, use the EGA+ BIOS
+ incb %al # location of max lines.
+mopar2: movb %al, %fs:(PARAM_VIDEO_LINES)
+ ret
+
+#ifdef CONFIG_VIDEO_SELECT
+# Fetching of VESA frame buffer parameters
+mopar_gr:
+ leaw modelist+1024, %di
+ movb $0x23, %fs:(PARAM_HAVE_VGA)
+ movw 16(%di), %ax
+ movw %ax, %fs:(PARAM_LFB_LINELENGTH)
+ movw 18(%di), %ax
+ movw %ax, %fs:(PARAM_LFB_WIDTH)
+ movw 20(%di), %ax
+ movw %ax, %fs:(PARAM_LFB_HEIGHT)
+ movb 25(%di), %al
+ movb $0, %ah
+ movw %ax, %fs:(PARAM_LFB_DEPTH)
+ movb 29(%di), %al
+ movb $0, %ah
+ movw %ax, %fs:(PARAM_LFB_PAGES)
+ movl 40(%di), %eax
+ movl %eax, %fs:(PARAM_LFB_BASE)
+ movl 31(%di), %eax
+ movl %eax, %fs:(PARAM_LFB_COLORS)
+ movl 35(%di), %eax
+ movl %eax, %fs:(PARAM_LFB_COLORS+4)
+ movw 0(%di), %ax
+ movw %ax, %fs:(PARAM_VESA_ATTRIB)
+
+# get video mem size
+ leaw modelist+1024, %di
+ movw $0x4f00, %ax
+ int $0x10
+ xorl %eax, %eax
+ movw 18(%di), %ax
+ movl %eax, %fs:(PARAM_LFB_SIZE)
+
+# store mode capabilities
+ movl 10(%di), %eax
+ movl %eax, %fs:(PARAM_CAPABILITIES)
+
+# switching the DAC to 8-bit is for <= 8 bpp only
+ movw %fs:(PARAM_LFB_DEPTH), %ax
+ cmpw $8, %ax
+ jg dac_done
+
+# get DAC switching capability
+ xorl %eax, %eax
+ movb 10(%di), %al
+ testb $1, %al
+ jz dac_set
+
+# attempt to switch DAC to 8-bit
+ movw $0x4f08, %ax
+ movw $0x0800, %bx
+ int $0x10
+ cmpw $0x004f, %ax
+ jne dac_set
+ movb %bh, dac_size # store actual DAC size
+
+dac_set:
+# set color size to DAC size
+ movb dac_size, %al
+ movb %al, %fs:(PARAM_LFB_COLORS+0)
+ movb %al, %fs:(PARAM_LFB_COLORS+2)
+ movb %al, %fs:(PARAM_LFB_COLORS+4)
+ movb %al, %fs:(PARAM_LFB_COLORS+6)
+
+# set color offsets to 0
+ movb $0, %fs:(PARAM_LFB_COLORS+1)
+ movb $0, %fs:(PARAM_LFB_COLORS+3)
+ movb $0, %fs:(PARAM_LFB_COLORS+5)
+ movb $0, %fs:(PARAM_LFB_COLORS+7)
+
+dac_done:
+# get protected mode interface informations
+ movw $0x4f0a, %ax
+ xorw %bx, %bx
+ xorw %di, %di
+ int $0x10
+ cmp $0x004f, %ax
+ jnz no_pm
+
+ movw %es, %fs:(PARAM_VESAPM_SEG)
+ movw %di, %fs:(PARAM_VESAPM_OFF)
+no_pm: ret
+
+# The video mode menu
+mode_menu:
+ leaw keymsg, %si # "Return/Space/Timeout" message
+ call prtstr
+ call flush
+nokey: call getkt
+
+ cmpb $0x0d, %al # ENTER ?
+ je listm # yes - manual mode selection
+
+ cmpb $0x20, %al # SPACE ?
+ je defmd1 # no - repeat
+
+ call beep
+ jmp nokey
+
+defmd1: ret # No mode chosen? Default 80x25
+
+listm: call mode_table # List mode table
+listm0: leaw name_bann, %si # Print adapter name
+ call prtstr
+ movw card_name, %si
+ orw %si, %si
+ jnz an2
+
+ movb adapter, %al
+ leaw old_name, %si
+ orb %al, %al
+ jz an1
+
+ leaw ega_name, %si
+ decb %al
+ jz an1
+
+ leaw vga_name, %si
+ jmp an1
+
+an2: call prtstr
+ leaw svga_name, %si
+an1: call prtstr
+ leaw listhdr, %si # Table header
+ call prtstr
+ movb $0x30, %dl # DL holds mode number
+ leaw modelist, %si
+lm1: cmpw $ASK_VGA, (%si) # End?
+ jz lm2
+
+ movb %dl, %al # Menu selection number
+ call prtchr
+ call prtsp2
+ lodsw
+ call prthw # Mode ID
+ call prtsp2
+ movb 0x1(%si), %al
+ call prtdec # Rows
+ movb $0x78, %al # the letter 'x'
+ call prtchr
+ lodsw
+ call prtdec # Columns
+ movb $0x0d, %al # New line
+ call prtchr
+ movb $0x0a, %al
+ call prtchr
+ incb %dl # Next character
+ cmpb $0x3a, %dl
+ jnz lm1
+
+ movb $0x61, %dl
+ jmp lm1
+
+lm2: leaw prompt, %si # Mode prompt
+ call prtstr
+ leaw edit_buf, %di # Editor buffer
+lm3: call getkey
+ cmpb $0x0d, %al # Enter?
+ jz lment
+
+ cmpb $0x08, %al # Backspace?
+ jz lmbs
+
+ cmpb $0x20, %al # Printable?
+ jc lm3
+
+ cmpw $edit_buf+4, %di # Enough space?
+ jz lm3
+
+ stosb
+ call prtchr
+ jmp lm3
+
+lmbs: cmpw $edit_buf, %di # Backspace
+ jz lm3
+
+ decw %di
+ movb $0x08, %al
+ call prtchr
+ call prtspc
+ movb $0x08, %al
+ call prtchr
+ jmp lm3
+
+lment: movb $0, (%di)
+ leaw crlft, %si
+ call prtstr
+ leaw edit_buf, %si
+ cmpb $0, (%si) # Empty string = default mode
+ jz lmdef
+
+ cmpb $0, 1(%si) # One character = menu selection
+ jz mnusel
+
+ cmpw $0x6373, (%si) # "scan" => mode scanning
+ jnz lmhx
+
+ cmpw $0x6e61, 2(%si)
+ jz lmscan
+
+lmhx: xorw %bx, %bx # Else => mode ID in hex
+lmhex: lodsb
+ orb %al, %al
+ jz lmuse1
+
+ subb $0x30, %al
+ jc lmbad
+
+ cmpb $10, %al
+ jc lmhx1
+
+ subb $7, %al
+ andb $0xdf, %al
+ cmpb $10, %al
+ jc lmbad
+
+ cmpb $16, %al
+ jnc lmbad
+
+lmhx1: shlw $4, %bx
+ orb %al, %bl
+ jmp lmhex
+
+lmuse1: movw %bx, %ax
+ jmp lmuse
+
+mnusel: lodsb # Menu selection
+ xorb %ah, %ah
+ subb $0x30, %al
+ jc lmbad
+
+ cmpb $10, %al
+ jc lmuse
+
+ cmpb $0x61-0x30, %al
+ jc lmbad
+
+ subb $0x61-0x30-10, %al
+ cmpb $36, %al
+ jnc lmbad
+
+lmuse: call mode_set
+ jc lmdef
+
+lmbad: leaw unknt, %si
+ call prtstr
+ jmp lm2
+lmscan: cmpb $0, adapter # Scanning only on EGA/VGA
+ jz lmbad
+
+ movw $0, mt_end # Scanning of modes is
+ movb $1, scanning # done as new autodetection.
+ call mode_table
+ jmp listm0
+lmdef: ret
+
+# Additional parts of mode_set... (relative jumps, you know)
+setv7: # Video7 extended modes
+ DO_STORE
+ subb $VIDEO_FIRST_V7>>8, %bh
+ movw $0x6f05, %ax
+ int $0x10
+ stc
+ ret
+
+_setrec: jmp setrec # Ugly...
+_set_80x25: jmp set_80x25
+
+# Aliases for backward compatibility.
+setalias:
+ movw $VIDEO_80x25, %ax
+ incw %bx
+ jz mode_set
+
+ movb $VIDEO_8POINT-VIDEO_FIRST_SPECIAL, %al
+ incw %bx
+ jnz setbad # Fall-through!
+
+# Setting of user mode (AX=mode ID) => CF=success
+mode_set:
+ movw %ax, %fs:(0x01fa) # Store mode for use in acpi_wakeup.S
+ movw %ax, %bx
+ cmpb $0xff, %ah
+ jz setalias
+
+ testb $VIDEO_RECALC>>8, %ah
+ jnz _setrec
+
+ cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah
+ jnc setres
+
+ cmpb $VIDEO_FIRST_SPECIAL>>8, %ah
+ jz setspc
+
+ cmpb $VIDEO_FIRST_V7>>8, %ah
+ jz setv7
+
+ cmpb $VIDEO_FIRST_VESA>>8, %ah
+ jnc check_vesa
+
+ orb %ah, %ah
+ jz setmenu
+
+ decb %ah
+ jz setbios
+
+setbad: clc
+ movb $0, do_restore # The screen needn't be restored
+ ret
+
+setvesa:
+ DO_STORE
+ subb $VIDEO_FIRST_VESA>>8, %bh
+ movw $0x4f02, %ax # VESA BIOS mode set call
+ int $0x10
+ cmpw $0x004f, %ax # AL=4f if implemented
+ jnz setbad # AH=0 if OK
+
+ stc
+ ret
+
+setbios:
+ DO_STORE
+ int $0x10 # Standard BIOS mode set call
+ pushw %bx
+ movb $0x0f, %ah # Check if really set
+ int $0x10
+ popw %bx
+ cmpb %bl, %al
+ jnz setbad
+
+ stc
+ ret
+
+setspc: xorb %bh, %bh # Set special mode
+ cmpb $VIDEO_LAST_SPECIAL-VIDEO_FIRST_SPECIAL, %bl
+ jnc setbad
+
+ addw %bx, %bx
+ jmp *spec_inits(%bx)
+
+setmenu:
+ orb %al, %al # 80x25 is an exception
+ jz _set_80x25
+
+ pushw %bx # Set mode chosen from menu
+ call mode_table # Build the mode table
+ popw %ax
+ shlw $2, %ax
+ addw %ax, %si
+ cmpw %di, %si
+ jnc setbad
+
+ movw (%si), %ax # Fetch mode ID
+_m_s: jmp mode_set
+
+setres: pushw %bx # Set mode chosen by resolution
+ call mode_table
+ popw %bx
+ xchgb %bl, %bh
+setr1: lodsw
+ cmpw $ASK_VGA, %ax # End of the list?
+ jz setbad
+
+ lodsw
+ cmpw %bx, %ax
+ jnz setr1
+
+ movw -4(%si), %ax # Fetch mode ID
+ jmp _m_s
+
+check_vesa:
+ leaw modelist+1024, %di
+ subb $VIDEO_FIRST_VESA>>8, %bh
+ movw %bx, %cx # Get mode information structure
+ movw $0x4f01, %ax
+ int $0x10
+ addb $VIDEO_FIRST_VESA>>8, %bh
+ cmpw $0x004f, %ax
+ jnz setbad
+
+ movb (%di), %al # Check capabilities.
+ andb $0x19, %al
+ cmpb $0x09, %al
+ jz setvesa # This is a text mode
+
+ movb (%di), %al # Check capabilities.
+ andb $0x99, %al
+ cmpb $0x99, %al
+ jnz _setbad # Doh! No linear frame buffer.
+
+ subb $VIDEO_FIRST_VESA>>8, %bh
+ orw $0x4000, %bx # Use linear frame buffer
+ movw $0x4f02, %ax # VESA BIOS mode set call
+ int $0x10
+ cmpw $0x004f, %ax # AL=4f if implemented
+ jnz _setbad # AH=0 if OK
+
+ movb $1, graphic_mode # flag graphic mode
+ movb $0, do_restore # no screen restore
+ stc
+ ret
+
+_setbad: jmp setbad # Ugly...
+
+# Recalculate vertical display end registers -- this fixes various
+# inconsistencies of extended modes on many adapters. Called when
+# the VIDEO_RECALC flag is set in the mode ID.
+
+setrec: subb $VIDEO_RECALC>>8, %ah # Set the base mode
+ call mode_set
+ jnc rct3
+
+ movw %gs:(0x485), %ax # Font size in pixels
+ movb %gs:(0x484), %bl # Number of rows
+ incb %bl
+ mulb %bl # Number of visible
+ decw %ax # scan lines - 1
+ movw $0x3d4, %dx
+ movw %ax, %bx
+ movb $0x12, %al # Lower 8 bits
+ movb %bl, %ah
+ outw %ax, %dx
+ movb $0x07, %al # Bits 8 and 9 in the overflow register
+ call inidx
+ xchgb %al, %ah
+ andb $0xbd, %ah
+ shrb %bh
+ jnc rct1
+ orb $0x02, %ah
+rct1: shrb %bh
+ jnc rct2
+ orb $0x40, %ah
+rct2: movb $0x07, %al
+ outw %ax, %dx
+ stc
+rct3: ret
+
+# Table of routines for setting of the special modes.
+spec_inits:
+ .word set_80x25
+ .word set_8pixel
+ .word set_80x43
+ .word set_80x28
+ .word set_current
+ .word set_80x30
+ .word set_80x34
+ .word set_80x60
+ .word set_gfx
+
+# Set the 80x25 mode. If already set, do nothing.
+set_80x25:
+ movw $0x5019, force_size # Override possibly broken BIOS
+use_80x25:
+#ifdef CONFIG_VIDEO_400_HACK
+ movw $0x1202, %ax # Force 400 scan lines
+ movb $0x30, %bl
+ int $0x10
+#else
+ movb $0x0f, %ah # Get current mode ID
+ int $0x10
+ cmpw $0x5007, %ax # Mode 7 (80x25 mono) is the only one available
+ jz st80 # on CGA/MDA/HGA and is also available on EGAM
+
+ cmpw $0x5003, %ax # Unknown mode, force 80x25 color
+ jnz force3
+
+st80: cmpb $0, adapter # CGA/MDA/HGA => mode 3/7 is always 80x25
+ jz set80
+
+ movb %gs:(0x0484), %al # This is EGA+ -- beware of 80x50 etc.
+ orb %al, %al # Some buggy BIOS'es set 0 rows
+ jz set80
+
+ cmpb $24, %al # It's hopefully correct
+ jz set80
+#endif /* CONFIG_VIDEO_400_HACK */
+force3: DO_STORE
+ movw $0x0003, %ax # Forced set
+ int $0x10
+set80: stc
+ ret
+
+# Set the 80x50/80x43 8-pixel mode. Simple BIOS calls.
+set_8pixel:
+ DO_STORE
+ call use_80x25 # The base is 80x25
+set_8pt:
+ movw $0x1112, %ax # Use 8x8 font
+ xorb %bl, %bl
+ int $0x10
+ movw $0x1200, %ax # Use alternate print screen
+ movb $0x20, %bl
+ int $0x10
+ movw $0x1201, %ax # Turn off cursor emulation
+ movb $0x34, %bl
+ int $0x10
+ movb $0x01, %ah # Define cursor scan lines 6-7
+ movw $0x0607, %cx
+ int $0x10
+set_current:
+ stc
+ ret
+
+# Set the 80x28 mode. This mode works on all VGA's, because it's a standard
+# 80x25 mode with 14-point fonts instead of 16-point.
+set_80x28:
+ DO_STORE
+ call use_80x25 # The base is 80x25
+set14: movw $0x1111, %ax # Use 9x14 font
+ xorb %bl, %bl
+ int $0x10
+ movb $0x01, %ah # Define cursor scan lines 11-12
+ movw $0x0b0c, %cx
+ int $0x10
+ stc
+ ret
+
+# Set the 80x43 mode. This mode is works on all VGA's.
+# It's a 350-scanline mode with 8-pixel font.
+set_80x43:
+ DO_STORE
+ movw $0x1201, %ax # Set 350 scans
+ movb $0x30, %bl
+ int $0x10
+ movw $0x0003, %ax # Reset video mode
+ int $0x10
+ jmp set_8pt # Use 8-pixel font
+
+# Set the 80x30 mode (all VGA's). 480 scanlines, 16-pixel font.
+set_80x30:
+ call use_80x25 # Start with real 80x25
+ DO_STORE
+ movw $0x3cc, %dx # Get CRTC port
+ inb %dx, %al
+ movb $0xd4, %dl
+ rorb %al # Mono or color?
+ jc set48a
+
+ movb $0xb4, %dl
+set48a: movw $0x0c11, %ax # Vertical sync end (also unlocks CR0-7)
+ call outidx
+ movw $0x0b06, %ax # Vertical total
+ call outidx
+ movw $0x3e07, %ax # (Vertical) overflow
+ call outidx
+ movw $0xea10, %ax # Vertical sync start
+ call outidx
+ movw $0xdf12, %ax # Vertical display end
+ call outidx
+ movw $0xe715, %ax # Vertical blank start
+ call outidx
+ movw $0x0416, %ax # Vertical blank end
+ call outidx
+ pushw %dx
+ movb $0xcc, %dl # Misc output register (read)
+ inb %dx, %al
+ movb $0xc2, %dl # (write)
+ andb $0x0d, %al # Preserve clock select bits and color bit
+ orb $0xe2, %al # Set correct sync polarity
+ outb %al, %dx
+ popw %dx
+ movw $0x501e, force_size
+ stc # That's all.
+ ret
+
+# Set the 80x34 mode (all VGA's). 480 scans, 14-pixel font.
+set_80x34:
+ call set_80x30 # Set 480 scans
+ call set14 # And 14-pt font
+ movw $0xdb12, %ax # VGA vertical display end
+ movw $0x5022, force_size
+setvde: call outidx
+ stc
+ ret
+
+# Set the 80x60 mode (all VGA's). 480 scans, 8-pixel font.
+set_80x60:
+ call set_80x30 # Set 480 scans
+ call set_8pt # And 8-pt font
+ movw $0xdf12, %ax # VGA vertical display end
+ movw $0x503c, force_size
+ jmp setvde
+
+# Special hack for ThinkPad graphics
+set_gfx:
+#ifdef CONFIG_VIDEO_GFX_HACK
+ movw $VIDEO_GFX_BIOS_AX, %ax
+ movw $VIDEO_GFX_BIOS_BX, %bx
+ int $0x10
+ movw $VIDEO_GFX_DUMMY_RESOLUTION, force_size
+ stc
+#endif
+ ret
+
+#ifdef CONFIG_VIDEO_RETAIN
+
+# Store screen contents to temporary buffer.
+store_screen:
+ cmpb $0, do_restore # Already stored?
+ jnz stsr
+
+ testb $CAN_USE_HEAP, loadflags # Have we space for storing?
+ jz stsr
+
+ pushw %ax
+ pushw %bx
+ pushw force_size # Don't force specific size
+ movw $0, force_size
+ call mode_params # Obtain params of current mode
+ popw force_size
+ movb %fs:(PARAM_VIDEO_LINES), %ah
+ movb %fs:(PARAM_VIDEO_COLS), %al
+ movw %ax, %bx # BX=dimensions
+ mulb %ah
+ movw %ax, %cx # CX=number of characters
+ addw %ax, %ax # Calculate image size
+ addw $modelist+1024+4, %ax
+ cmpw heap_end_ptr, %ax
+ jnc sts1 # Unfortunately, out of memory
+
+ movw %fs:(PARAM_CURSOR_POS), %ax # Store mode params
+ leaw modelist+1024, %di
+ stosw
+ movw %bx, %ax
+ stosw
+ pushw %ds # Store the screen
+ movw video_segment, %ds
+ xorw %si, %si
+ rep
+ movsw
+ popw %ds
+ incb do_restore # Screen will be restored later
+sts1: popw %bx
+ popw %ax
+stsr: ret
+
+# Restore screen contents from temporary buffer.
+restore_screen:
+ cmpb $0, do_restore # Has the screen been stored?
+ jz res1
+
+ call mode_params # Get parameters of current mode
+ movb %fs:(PARAM_VIDEO_LINES), %cl
+ movb %fs:(PARAM_VIDEO_COLS), %ch
+ leaw modelist+1024, %si # Screen buffer
+ lodsw # Set cursor position
+ movw %ax, %dx
+ cmpb %cl, %dh
+ jc res2
+
+ movb %cl, %dh
+ decb %dh
+res2: cmpb %ch, %dl
+ jc res3
+
+ movb %ch, %dl
+ decb %dl
+res3: movb $0x02, %ah
+ movb $0x00, %bh
+ int $0x10
+ lodsw # Display size
+ movb %ah, %dl # DL=number of lines
+ movb $0, %ah # BX=phys. length of orig. line
+ movw %ax, %bx
+ cmpb %cl, %dl # Too many?
+ jc res4
+
+ pushw %ax
+ movb %dl, %al
+ subb %cl, %al
+ mulb %bl
+ addw %ax, %si
+ addw %ax, %si
+ popw %ax
+ movb %cl, %dl
+res4: cmpb %ch, %al # Too wide?
+ jc res5
+
+ movb %ch, %al # AX=width of src. line
+res5: movb $0, %cl
+ xchgb %ch, %cl
+ movw %cx, %bp # BP=width of dest. line
+ pushw %es
+ movw video_segment, %es
+ xorw %di, %di # Move the data
+ addw %bx, %bx # Convert BX and BP to _bytes_
+ addw %bp, %bp
+res6: pushw %si
+ pushw %di
+ movw %ax, %cx
+ rep
+ movsw
+ popw %di
+ popw %si
+ addw %bp, %di
+ addw %bx, %si
+ decb %dl
+ jnz res6
+
+ popw %es # Done
+res1: ret
+#endif /* CONFIG_VIDEO_RETAIN */
+
+# Write to indexed VGA register (AL=index, AH=data, DX=index reg. port)
+outidx: outb %al, %dx
+ pushw %ax
+ movb %ah, %al
+ incw %dx
+ outb %al, %dx
+ decw %dx
+ popw %ax
+ ret
+
+# Build the table of video modes (stored after the setup.S code at the
+# `modelist' label. Each video mode record looks like:
+# .word MODE-ID (our special mode ID (see above))
+# .byte rows (number of rows)
+# .byte columns (number of columns)
+# Returns address of the end of the table in DI, the end is marked
+# with a ASK_VGA ID.
+mode_table:
+ movw mt_end, %di # Already filled?
+ orw %di, %di
+ jnz mtab1x
+
+ leaw modelist, %di # Store standard modes:
+ movl $VIDEO_80x25 + 0x50190000, %eax # The 80x25 mode (ALL)
+ stosl
+ movb adapter, %al # CGA/MDA/HGA -- no more modes
+ orb %al, %al
+ jz mtabe
+
+ decb %al
+ jnz mtabv
+
+ movl $VIDEO_8POINT + 0x502b0000, %eax # The 80x43 EGA mode
+ stosl
+ jmp mtabe
+
+mtab1x: jmp mtab1
+
+mtabv: leaw vga_modes, %si # All modes for std VGA
+ movw $vga_modes_end-vga_modes, %cx
+ rep # I'm unable to use movsw as I don't know how to store a half
+ movsb # of the expression above to cx without using explicit shr.
+
+ cmpb $0, scanning # Mode scan requested?
+ jz mscan1
+
+ call mode_scan
+mscan1:
+
+#ifdef CONFIG_VIDEO_LOCAL
+ call local_modes
+#endif /* CONFIG_VIDEO_LOCAL */
+
+#ifdef CONFIG_VIDEO_VESA
+ call vesa_modes # Detect VESA VGA modes
+#endif /* CONFIG_VIDEO_VESA */
+
+#ifdef CONFIG_VIDEO_SVGA
+ cmpb $0, scanning # Bypass when scanning
+ jnz mscan2
+
+ call svga_modes # Detect SVGA cards & modes
+mscan2:
+#endif /* CONFIG_VIDEO_SVGA */
+
+mtabe:
+
+#ifdef CONFIG_VIDEO_COMPACT
+ leaw modelist, %si
+ movw %di, %dx
+ movw %si, %di
+cmt1: cmpw %dx, %si # Scan all modes
+ jz cmt2
+
+ leaw modelist, %bx # Find in previous entries
+ movw 2(%si), %cx
+cmt3: cmpw %bx, %si
+ jz cmt4
+
+ cmpw 2(%bx), %cx # Found => don't copy this entry
+ jz cmt5
+
+ addw $4, %bx
+ jmp cmt3
+
+cmt4: movsl # Copy entry
+ jmp cmt1
+
+cmt5: addw $4, %si # Skip entry
+ jmp cmt1
+
+cmt2:
+#endif /* CONFIG_VIDEO_COMPACT */
+
+ movw $ASK_VGA, (%di) # End marker
+ movw %di, mt_end
+mtab1: leaw modelist, %si # SI=mode list, DI=list end
+ret0: ret
+
+# Modes usable on all standard VGAs
+vga_modes:
+ .word VIDEO_8POINT
+ .word 0x5032 # 80x50
+ .word VIDEO_80x43
+ .word 0x502b # 80x43
+ .word VIDEO_80x28
+ .word 0x501c # 80x28
+ .word VIDEO_80x30
+ .word 0x501e # 80x30
+ .word VIDEO_80x34
+ .word 0x5022 # 80x34
+ .word VIDEO_80x60
+ .word 0x503c # 80x60
+#ifdef CONFIG_VIDEO_GFX_HACK
+ .word VIDEO_GFX_HACK
+ .word VIDEO_GFX_DUMMY_RESOLUTION
+#endif
+
+vga_modes_end:
+# Detect VESA modes.
+
+#ifdef CONFIG_VIDEO_VESA
+vesa_modes:
+ cmpb $2, adapter # VGA only
+ jnz ret0
+
+ movw %di, %bp # BP=original mode table end
+ addw $0x200, %di # Buffer space
+ movw $0x4f00, %ax # VESA Get card info call
+ int $0x10
+ movw %bp, %di
+ cmpw $0x004f, %ax # Successful?
+ jnz ret0
+
+ cmpw $0x4556, 0x200(%di)
+ jnz ret0
+
+ cmpw $0x4153, 0x202(%di)
+ jnz ret0
+
+ movw $vesa_name, card_name # Set name to "VESA VGA"
+ pushw %gs
+ lgsw 0x20e(%di), %si # GS:SI=mode list
+ movw $128, %cx # Iteration limit
+vesa1:
+# gas version 2.9.1, using BFD version 2.9.1.0.23 buggers the next inst.
+# XXX: lodsw %gs:(%si), %ax # Get next mode in the list
+ gs; lodsw
+ cmpw $0xffff, %ax # End of the table?
+ jz vesar
+
+ cmpw $0x0080, %ax # Check validity of mode ID
+ jc vesa2
+
+ orb %ah, %ah # Valid IDs: 0x0000-0x007f/0x0100-0x07ff
+ jz vesan # Certain BIOSes report 0x80-0xff!
+
+ cmpw $0x0800, %ax
+ jnc vesae
+
+vesa2: pushw %cx
+ movw %ax, %cx # Get mode information structure
+ movw $0x4f01, %ax
+ int $0x10
+ movw %cx, %bx # BX=mode number
+ addb $VIDEO_FIRST_VESA>>8, %bh
+ popw %cx
+ cmpw $0x004f, %ax
+ jnz vesan # Don't report errors (buggy BIOSES)
+
+ movb (%di), %al # Check capabilities. We require
+ andb $0x19, %al # a color text mode.
+ cmpb $0x09, %al
+ jnz vesan
+
+ cmpw $0xb800, 8(%di) # Standard video memory address required
+ jnz vesan
+
+ testb $2, (%di) # Mode characteristics supplied?
+ movw %bx, (%di) # Store mode number
+ jz vesa3
+
+ xorw %dx, %dx
+ movw 0x12(%di), %bx # Width
+ orb %bh, %bh
+ jnz vesan
+
+ movb %bl, 0x3(%di)
+ movw 0x14(%di), %ax # Height
+ orb %ah, %ah
+ jnz vesan
+
+ movb %al, 2(%di)
+ mulb %bl
+ cmpw $8193, %ax # Small enough for Linux console driver?
+ jnc vesan
+
+ jmp vesaok
+
+vesa3: subw $0x8108, %bx # This mode has no detailed info specified,
+ jc vesan # so it must be a standard VESA mode.
+
+ cmpw $5, %bx
+ jnc vesan
+
+ movw vesa_text_mode_table(%bx), %ax
+ movw %ax, 2(%di)
+vesaok: addw $4, %di # The mode is valid. Store it.
+vesan: loop vesa1 # Next mode. Limit exceeded => error
+vesae: leaw vesaer, %si
+ call prtstr
+ movw %bp, %di # Discard already found modes.
+vesar: popw %gs
+ ret
+
+# Dimensions of standard VESA text modes
+vesa_text_mode_table:
+ .byte 60, 80 # 0108
+ .byte 25, 132 # 0109
+ .byte 43, 132 # 010A
+ .byte 50, 132 # 010B
+ .byte 60, 132 # 010C
+#endif /* CONFIG_VIDEO_VESA */
+
+# Scan for video modes. A bit dirty, but should work.
+mode_scan:
+ movw $0x0100, %cx # Start with mode 0
+scm1: movb $0, %ah # Test the mode
+ movb %cl, %al
+ int $0x10
+ movb $0x0f, %ah
+ int $0x10
+ cmpb %cl, %al
+ jnz scm2 # Mode not set
+
+ movw $0x3c0, %dx # Test if it's a text mode
+ movb $0x10, %al # Mode bits
+ call inidx
+ andb $0x03, %al
+ jnz scm2
+
+ movb $0xce, %dl # Another set of mode bits
+ movb $0x06, %al
+ call inidx
+ shrb %al
+ jc scm2
+
+ movb $0xd4, %dl # Cursor location
+ movb $0x0f, %al
+ call inidx
+ orb %al, %al
+ jnz scm2
+
+ movw %cx, %ax # Ok, store the mode
+ stosw
+ movb %gs:(0x484), %al # Number of rows
+ incb %al
+ stosb
+ movw %gs:(0x44a), %ax # Number of columns
+ stosb
+scm2: incb %cl
+ jns scm1
+
+ movw $0x0003, %ax # Return back to mode 3
+ int $0x10
+ ret
+
+tstidx: outw %ax, %dx # OUT DX,AX and inidx
+inidx: outb %al, %dx # Read from indexed VGA register
+ incw %dx # AL=index, DX=index reg port -> AL=data
+ inb %dx, %al
+ decw %dx
+ ret
+
+# Try to detect type of SVGA card and supply (usually approximate) video
+# mode table for it.
+
+#ifdef CONFIG_VIDEO_SVGA
+svga_modes:
+ leaw svga_table, %si # Test all known SVGA adapters
+dosvga: lodsw
+ movw %ax, %bp # Default mode table
+ orw %ax, %ax
+ jz didsv1
+
+ lodsw # Pointer to test routine
+ pushw %si
+ pushw %di
+ pushw %es
+ movw $0xc000, %bx
+ movw %bx, %es
+ call *%ax # Call test routine
+ popw %es
+ popw %di
+ popw %si
+ orw %bp, %bp
+ jz dosvga
+
+ movw %bp, %si # Found, copy the modes
+ movb svga_prefix, %ah
+cpsvga: lodsb
+ orb %al, %al
+ jz didsv
+
+ stosw
+ movsw
+ jmp cpsvga
+
+didsv: movw %si, card_name # Store pointer to card name
+didsv1: ret
+
+# Table of all known SVGA cards. For each card, we store a pointer to
+# a table of video modes supported by the card and a pointer to a routine
+# used for testing of presence of the card. The video mode table is always
+# followed by the name of the card or the chipset.
+svga_table:
+ .word ati_md, ati_test
+ .word oak_md, oak_test
+ .word paradise_md, paradise_test
+ .word realtek_md, realtek_test
+ .word s3_md, s3_test
+ .word chips_md, chips_test
+ .word video7_md, video7_test
+ .word cirrus5_md, cirrus5_test
+ .word cirrus6_md, cirrus6_test
+ .word cirrus1_md, cirrus1_test
+ .word ahead_md, ahead_test
+ .word everex_md, everex_test
+ .word genoa_md, genoa_test
+ .word trident_md, trident_test
+ .word tseng_md, tseng_test
+ .word 0
+
+# Test routines and mode tables:
+
+# S3 - The test algorithm was taken from the SuperProbe package
+# for XFree86 1.2.1. Report bugs to Christoph.Niemann@linux.org
+s3_test:
+ movw $0x0f35, %cx # we store some constants in cl/ch
+ movw $0x03d4, %dx
+ movb $0x38, %al
+ call inidx
+ movb %al, %bh # store current CRT-register 0x38
+ movw $0x0038, %ax
+ call outidx # disable writing to special regs
+ movb %cl, %al # check whether we can write special reg 0x35
+ call inidx
+ movb %al, %bl # save the current value of CRT reg 0x35
+ andb $0xf0, %al # clear bits 0-3
+ movb %al, %ah
+ movb %cl, %al # and write it to CRT reg 0x35
+ call outidx
+ call inidx # now read it back
+ andb %ch, %al # clear the upper 4 bits
+ jz s3_2 # the first test failed. But we have a
+
+ movb %bl, %ah # second chance
+ movb %cl, %al
+ call outidx
+ jmp s3_1 # do the other tests
+
+s3_2: movw %cx, %ax # load ah with 0xf and al with 0x35
+ orb %bl, %ah # set the upper 4 bits of ah with the orig value
+ call outidx # write ...
+ call inidx # ... and reread
+ andb %cl, %al # turn off the upper 4 bits
+ pushw %ax
+ movb %bl, %ah # restore old value in register 0x35
+ movb %cl, %al
+ call outidx
+ popw %ax
+ cmpb %ch, %al # setting lower 4 bits was successful => bad
+ je no_s3 # writing is allowed => this is not an S3
+
+s3_1: movw $0x4838, %ax # allow writing to special regs by putting
+ call outidx # magic number into CRT-register 0x38
+ movb %cl, %al # check whether we can write special reg 0x35
+ call inidx
+ movb %al, %bl
+ andb $0xf0, %al
+ movb %al, %ah
+ movb %cl, %al
+ call outidx
+ call inidx
+ andb %ch, %al
+ jnz no_s3 # no, we can't write => no S3
+
+ movw %cx, %ax
+ orb %bl, %ah
+ call outidx
+ call inidx
+ andb %ch, %al
+ pushw %ax
+ movb %bl, %ah # restore old value in register 0x35
+ movb %cl, %al
+ call outidx
+ popw %ax
+ cmpb %ch, %al
+ jne no_s31 # writing not possible => no S3
+ movb $0x30, %al
+ call inidx # now get the S3 id ...
+ leaw idS3, %di
+ movw $0x10, %cx
+ repne
+ scasb
+ je no_s31
+
+ movb %bh, %ah
+ movb $0x38, %al
+ jmp s3rest
+
+no_s3: movb $0x35, %al # restore CRT register 0x35
+ movb %bl, %ah
+ call outidx
+no_s31: xorw %bp, %bp # Detection failed
+s3rest: movb %bh, %ah
+ movb $0x38, %al # restore old value of CRT register 0x38
+ jmp outidx
+
+idS3: .byte 0x81, 0x82, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95
+ .byte 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa8, 0xb0
+
+s3_md: .byte 0x54, 0x2b, 0x84
+ .byte 0x55, 0x19, 0x84
+ .byte 0
+ .ascii "S3"
+ .byte 0
+
+# ATI cards.
+ati_test:
+ leaw idati, %si
+ movw $0x31, %di
+ movw $0x09, %cx
+ repe
+ cmpsb
+ je atiok
+
+ xorw %bp, %bp
+atiok: ret
+
+idati: .ascii "761295520"
+
+ati_md: .byte 0x23, 0x19, 0x84
+ .byte 0x33, 0x2c, 0x84
+ .byte 0x22, 0x1e, 0x64
+ .byte 0x21, 0x19, 0x64
+ .byte 0x58, 0x21, 0x50
+ .byte 0x5b, 0x1e, 0x50
+ .byte 0
+ .ascii "ATI"
+ .byte 0
+
+# AHEAD
+ahead_test:
+ movw $0x200f, %ax
+ movw $0x3ce, %dx
+ outw %ax, %dx
+ incw %dx
+ inb %dx, %al
+ cmpb $0x20, %al
+ je isahed
+
+ cmpb $0x21, %al
+ je isahed
+
+ xorw %bp, %bp
+isahed: ret
+
+ahead_md:
+ .byte 0x22, 0x2c, 0x84
+ .byte 0x23, 0x19, 0x84
+ .byte 0x24, 0x1c, 0x84
+ .byte 0x2f, 0x32, 0xa0
+ .byte 0x32, 0x22, 0x50
+ .byte 0x34, 0x42, 0x50
+ .byte 0
+ .ascii "Ahead"
+ .byte 0
+
+# Chips & Tech.
+chips_test:
+ movw $0x3c3, %dx
+ inb %dx, %al
+ orb $0x10, %al
+ outb %al, %dx
+ movw $0x104, %dx
+ inb %dx, %al
+ movb %al, %bl
+ movw $0x3c3, %dx
+ inb %dx, %al
+ andb $0xef, %al
+ outb %al, %dx
+ cmpb $0xa5, %bl
+ je cantok
+
+ xorw %bp, %bp
+cantok: ret
+
+chips_md:
+ .byte 0x60, 0x19, 0x84
+ .byte 0x61, 0x32, 0x84
+ .byte 0
+ .ascii "Chips & Technologies"
+ .byte 0
+
+# Cirrus Logic 5X0
+cirrus1_test:
+ movw $0x3d4, %dx
+ movb $0x0c, %al
+ outb %al, %dx
+ incw %dx
+ inb %dx, %al
+ movb %al, %bl
+ xorb %al, %al
+ outb %al, %dx
+ decw %dx
+ movb $0x1f, %al
+ outb %al, %dx
+ incw %dx
+ inb %dx, %al
+ movb %al, %bh
+ xorb %ah, %ah
+ shlb $4, %al
+ movw %ax, %cx
+ movb %bh, %al
+ shrb $4, %al
+ addw %ax, %cx
+ shlw $8, %cx
+ addw $6, %cx
+ movw %cx, %ax
+ movw $0x3c4, %dx
+ outw %ax, %dx
+ incw %dx
+ inb %dx, %al
+ andb %al, %al
+ jnz nocirr
+
+ movb %bh, %al
+ outb %al, %dx
+ inb %dx, %al
+ cmpb $0x01, %al
+ je iscirr
+
+nocirr: xorw %bp, %bp
+iscirr: movw $0x3d4, %dx
+ movb %bl, %al
+ xorb %ah, %ah
+ shlw $8, %ax
+ addw $0x0c, %ax
+ outw %ax, %dx
+ ret
+
+cirrus1_md:
+ .byte 0x1f, 0x19, 0x84
+ .byte 0x20, 0x2c, 0x84
+ .byte 0x22, 0x1e, 0x84
+ .byte 0x31, 0x25, 0x64
+ .byte 0
+ .ascii "Cirrus Logic 5X0"
+ .byte 0
+
+# Cirrus Logic 54XX
+cirrus5_test:
+ movw $0x3c4, %dx
+ movb $6, %al
+ call inidx
+ movb %al, %bl # BL=backup
+ movw $6, %ax
+ call tstidx
+ cmpb $0x0f, %al
+ jne c5fail
+
+ movw $0x1206, %ax
+ call tstidx
+ cmpb $0x12, %al
+ jne c5fail
+
+ movb $0x1e, %al
+ call inidx
+ movb %al, %bh
+ movb %bh, %ah
+ andb $0xc0, %ah
+ movb $0x1e, %al
+ call tstidx
+ andb $0x3f, %al
+ jne c5xx
+
+ movb $0x1e, %al
+ movb %bh, %ah
+ orb $0x3f, %ah
+ call tstidx
+ xorb $0x3f, %al
+ andb $0x3f, %al
+c5xx: pushf
+ movb $0x1e, %al
+ movb %bh, %ah
+ outw %ax, %dx
+ popf
+ je c5done
+
+c5fail: xorw %bp, %bp
+c5done: movb $6, %al
+ movb %bl, %ah
+ outw %ax, %dx
+ ret
+
+cirrus5_md:
+ .byte 0x14, 0x19, 0x84
+ .byte 0x54, 0x2b, 0x84
+ .byte 0
+ .ascii "Cirrus Logic 54XX"
+ .byte 0
+
+# Cirrus Logic 64XX -- no known extra modes, but must be identified, because
+# it's misidentified by the Ahead test.
+cirrus6_test:
+ movw $0x3ce, %dx
+ movb $0x0a, %al
+ call inidx
+ movb %al, %bl # BL=backup
+ movw $0xce0a, %ax
+ call tstidx
+ orb %al, %al
+ jne c2fail
+
+ movw $0xec0a, %ax
+ call tstidx
+ cmpb $0x01, %al
+ jne c2fail
+
+ movb $0xaa, %al
+ call inidx # 4X, 5X, 7X and 8X are valid 64XX chip ID's.
+ shrb $4, %al
+ subb $4, %al
+ jz c6done
+
+ decb %al
+ jz c6done
+
+ subb $2, %al
+ jz c6done
+
+ decb %al
+ jz c6done
+
+c2fail: xorw %bp, %bp
+c6done: movb $0x0a, %al
+ movb %bl, %ah
+ outw %ax, %dx
+ ret
+
+cirrus6_md:
+ .byte 0
+ .ascii "Cirrus Logic 64XX"
+ .byte 0
+
+# Everex / Trident
+everex_test:
+ movw $0x7000, %ax
+ xorw %bx, %bx
+ int $0x10
+ cmpb $0x70, %al
+ jne noevrx
+
+ shrw $4, %dx
+ cmpw $0x678, %dx
+ je evtrid
+
+ cmpw $0x236, %dx
+ jne evrxok
+
+evtrid: leaw trident_md, %bp
+evrxok: ret
+
+noevrx: xorw %bp, %bp
+ ret
+
+everex_md:
+ .byte 0x03, 0x22, 0x50
+ .byte 0x04, 0x3c, 0x50
+ .byte 0x07, 0x2b, 0x64
+ .byte 0x08, 0x4b, 0x64
+ .byte 0x0a, 0x19, 0x84
+ .byte 0x0b, 0x2c, 0x84
+ .byte 0x16, 0x1e, 0x50
+ .byte 0x18, 0x1b, 0x64
+ .byte 0x21, 0x40, 0xa0
+ .byte 0x40, 0x1e, 0x84
+ .byte 0
+ .ascii "Everex/Trident"
+ .byte 0
+
+# Genoa.
+genoa_test:
+ leaw idgenoa, %si # Check Genoa 'clues'
+ xorw %ax, %ax
+ movb %es:(0x37), %al
+ movw %ax, %di
+ movw $0x04, %cx
+ decw %si
+ decw %di
+l1: incw %si
+ incw %di
+ movb (%si), %al
+ testb %al, %al
+ jz l2
+
+ cmpb %es:(%di), %al
+l2: loope l1
+ orw %cx, %cx
+ je isgen
+
+ xorw %bp, %bp
+isgen: ret
+
+idgenoa: .byte 0x77, 0x00, 0x99, 0x66
+
+genoa_md:
+ .byte 0x58, 0x20, 0x50
+ .byte 0x5a, 0x2a, 0x64
+ .byte 0x60, 0x19, 0x84
+ .byte 0x61, 0x1d, 0x84
+ .byte 0x62, 0x20, 0x84
+ .byte 0x63, 0x2c, 0x84
+ .byte 0x64, 0x3c, 0x84
+ .byte 0x6b, 0x4f, 0x64
+ .byte 0x72, 0x3c, 0x50
+ .byte 0x74, 0x42, 0x50
+ .byte 0x78, 0x4b, 0x64
+ .byte 0
+ .ascii "Genoa"
+ .byte 0
+
+# OAK
+oak_test:
+ leaw idoakvga, %si
+ movw $0x08, %di
+ movw $0x08, %cx
+ repe
+ cmpsb
+ je isoak
+
+ xorw %bp, %bp
+isoak: ret
+
+idoakvga: .ascii "OAK VGA "
+
+oak_md: .byte 0x4e, 0x3c, 0x50
+ .byte 0x4f, 0x3c, 0x84
+ .byte 0x50, 0x19, 0x84
+ .byte 0x51, 0x2b, 0x84
+ .byte 0
+ .ascii "OAK"
+ .byte 0
+
+# WD Paradise.
+paradise_test:
+ leaw idparadise, %si
+ movw $0x7d, %di
+ movw $0x04, %cx
+ repe
+ cmpsb
+ je ispara
+
+ xorw %bp, %bp
+ispara: ret
+
+idparadise: .ascii "VGA="
+
+paradise_md:
+ .byte 0x41, 0x22, 0x50
+ .byte 0x47, 0x1c, 0x84
+ .byte 0x55, 0x19, 0x84
+ .byte 0x54, 0x2c, 0x84
+ .byte 0
+ .ascii "Paradise"
+ .byte 0
+
+# Trident.
+trident_test:
+ movw $0x3c4, %dx
+ movb $0x0e, %al
+ outb %al, %dx
+ incw %dx
+ inb %dx, %al
+ xchgb %al, %ah
+ xorb %al, %al
+ outb %al, %dx
+ inb %dx, %al
+ xchgb %ah, %al
+ movb %al, %bl # Strange thing ... in the book this wasn't
+ andb $0x02, %bl # necessary but it worked on my card which
+ jz setb2 # is a trident. Without it the screen goes
+ # blurred ...
+ andb $0xfd, %al
+ jmp clrb2
+
+setb2: orb $0x02, %al
+clrb2: outb %al, %dx
+ andb $0x0f, %ah
+ cmpb $0x02, %ah
+ je istrid
+
+ xorw %bp, %bp
+istrid: ret
+
+trident_md:
+ .byte 0x50, 0x1e, 0x50
+ .byte 0x51, 0x2b, 0x50
+ .byte 0x52, 0x3c, 0x50
+ .byte 0x57, 0x19, 0x84
+ .byte 0x58, 0x1e, 0x84
+ .byte 0x59, 0x2b, 0x84
+ .byte 0x5a, 0x3c, 0x84
+ .byte 0
+ .ascii "Trident"
+ .byte 0
+
+# Tseng.
+tseng_test:
+ movw $0x3cd, %dx
+ inb %dx, %al # Could things be this simple ! :-)
+ movb %al, %bl
+ movb $0x55, %al
+ outb %al, %dx
+ inb %dx, %al
+ movb %al, %ah
+ movb %bl, %al
+ outb %al, %dx
+ cmpb $0x55, %ah
+ je istsen
+
+isnot: xorw %bp, %bp
+istsen: ret
+
+tseng_md:
+ .byte 0x26, 0x3c, 0x50
+ .byte 0x2a, 0x28, 0x64
+ .byte 0x23, 0x19, 0x84
+ .byte 0x24, 0x1c, 0x84
+ .byte 0x22, 0x2c, 0x84
+ .byte 0x21, 0x3c, 0x84
+ .byte 0
+ .ascii "Tseng"
+ .byte 0
+
+# Video7.
+video7_test:
+ movw $0x3cc, %dx
+ inb %dx, %al
+ movw $0x3b4, %dx
+ andb $0x01, %al
+ jz even7
+
+ movw $0x3d4, %dx
+even7: movb $0x0c, %al
+ outb %al, %dx
+ incw %dx
+ inb %dx, %al
+ movb %al, %bl
+ movb $0x55, %al
+ outb %al, %dx
+ inb %dx, %al
+ decw %dx
+ movb $0x1f, %al
+ outb %al, %dx
+ incw %dx
+ inb %dx, %al
+ movb %al, %bh
+ decw %dx
+ movb $0x0c, %al
+ outb %al, %dx
+ incw %dx
+ movb %bl, %al
+ outb %al, %dx
+ movb $0x55, %al
+ xorb $0xea, %al
+ cmpb %bh, %al
+ jne isnot
+
+ movb $VIDEO_FIRST_V7>>8, svga_prefix # Use special mode switching
+ ret
+
+video7_md:
+ .byte 0x40, 0x2b, 0x50
+ .byte 0x43, 0x3c, 0x50
+ .byte 0x44, 0x3c, 0x64
+ .byte 0x41, 0x19, 0x84
+ .byte 0x42, 0x2c, 0x84
+ .byte 0x45, 0x1c, 0x84
+ .byte 0
+ .ascii "Video 7"
+ .byte 0
+
+# Realtek VGA
+realtek_test:
+ leaw idrtvga, %si
+ movw $0x45, %di
+ movw $0x0b, %cx
+ repe
+ cmpsb
+ je isrt
+
+ xorw %bp, %bp
+isrt: ret
+
+idrtvga: .ascii "REALTEK VGA"
+
+realtek_md:
+ .byte 0x1a, 0x3c, 0x50
+ .byte 0x1b, 0x19, 0x84
+ .byte 0x1c, 0x1e, 0x84
+ .byte 0x1d, 0x2b, 0x84
+ .byte 0x1e, 0x3c, 0x84
+ .byte 0
+ .ascii "REALTEK"
+ .byte 0
+
+#endif /* CONFIG_VIDEO_SVGA */
+
+# User-defined local mode table (VGA only)
+#ifdef CONFIG_VIDEO_LOCAL
+local_modes:
+ leaw local_mode_table, %si
+locm1: lodsw
+ orw %ax, %ax
+ jz locm2
+
+ stosw
+ movsw
+ jmp locm1
+
+locm2: ret
+
+# This is the table of local video modes which can be supplied manually
+# by the user. Each entry consists of mode ID (word) and dimensions
+# (byte for column count and another byte for row count). These modes
+# are placed before all SVGA and VESA modes and override them if table
+# compacting is enabled. The table must end with a zero word followed
+# by NUL-terminated video adapter name.
+local_mode_table:
+ .word 0x0100 # Example: 40x25
+ .byte 25,40
+ .word 0
+ .ascii "Local"
+ .byte 0
+#endif /* CONFIG_VIDEO_LOCAL */
+
+# Read a key and return the ASCII code in al, scan code in ah
+getkey: xorb %ah, %ah
+ int $0x16
+ ret
+
+# Read a key with a timeout of 30 seconds.
+# The hardware clock is used to get the time.
+getkt: call gettime
+ addb $30, %al # Wait 30 seconds
+ cmpb $60, %al
+ jl lminute
+
+ subb $60, %al
+lminute:
+ movb %al, %cl
+again: movb $0x01, %ah
+ int $0x16
+ jnz getkey # key pressed, so get it
+
+ call gettime
+ cmpb %cl, %al
+ jne again
+
+ movb $0x20, %al # timeout, return `space'
+ ret
+
+# Flush the keyboard buffer
+flush: movb $0x01, %ah
+ int $0x16
+ jz empty
+
+ xorb %ah, %ah
+ int $0x16
+ jmp flush
+
+empty: ret
+
+# Print hexadecimal number.
+prthw: pushw %ax
+ movb %ah, %al
+ call prthb
+ popw %ax
+prthb: pushw %ax
+ shrb $4, %al
+ call prthn
+ popw %ax
+ andb $0x0f, %al
+prthn: cmpb $0x0a, %al
+ jc prth1
+
+ addb $0x07, %al
+prth1: addb $0x30, %al
+ jmp prtchr
+
+# Print decimal number in al
+prtdec: pushw %ax
+ pushw %cx
+ xorb %ah, %ah
+ movb $0x0a, %cl
+ idivb %cl
+ cmpb $0x09, %al
+ jbe lt100
+
+ call prtdec
+ jmp skip10
+
+lt100: addb $0x30, %al
+ call prtchr
+skip10: movb %ah, %al
+ addb $0x30, %al
+ call prtchr
+ popw %cx
+ popw %ax
+ ret
+
+store_edid:
+ pushw %es # just save all registers
+ pushw %ax
+ pushw %bx
+ pushw %cx
+ pushw %dx
+ pushw %di
+
+ pushw %fs
+ popw %es
+
+ movl $0x13131313, %eax # memset block with 0x13
+ movw $32, %cx
+ movw $0x140, %di
+ cld
+ rep
+ stosl
+
+ movw $0x4f15, %ax # do VBE/DDC
+ movw $0x01, %bx
+ movw $0x00, %cx
+ movw $0x01, %dx
+ movw $0x140, %di
+ int $0x10
+
+ popw %di # restore all registers
+ popw %dx
+ popw %cx
+ popw %bx
+ popw %ax
+ popw %es
+ ret
+
+# VIDEO_SELECT-only variables
+mt_end: .word 0 # End of video mode table if built
+edit_buf: .space 6 # Line editor buffer
+card_name: .word 0 # Pointer to adapter name
+scanning: .byte 0 # Performing mode scan
+do_restore: .byte 0 # Screen contents altered during mode change
+svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes
+graphic_mode: .byte 0 # Graphic mode with a linear frame buffer
+dac_size: .byte 6 # DAC bit depth
+
+# Status messages
+keymsg: .ascii "Press <RETURN> to see video modes available, "
+ .ascii "<SPACE> to continue or wait 30 secs"
+ .byte 0x0d, 0x0a, 0
+
+listhdr: .byte 0x0d, 0x0a
+ .ascii "Mode: COLSxROWS:"
+
+crlft: .byte 0x0d, 0x0a, 0
+
+prompt: .byte 0x0d, 0x0a
+ .asciz "Enter mode number or `scan': "
+
+unknt: .asciz "Unknown mode ID. Try again."
+
+badmdt: .ascii "You passed an undefined mode number."
+ .byte 0x0d, 0x0a, 0
+
+vesaer: .ascii "Error: Scanning of VESA modes failed. Please "
+ .ascii "report to <mj@ucw.cz>."
+ .byte 0x0d, 0x0a, 0
+
+old_name: .asciz "CGA/MDA/HGA"
+
+ega_name: .asciz "EGA"
+
+svga_name: .ascii " "
+
+vga_name: .asciz "VGA"
+
+vesa_name: .asciz "VESA"
+
+name_bann: .asciz "Video adapter: "
+#endif /* CONFIG_VIDEO_SELECT */
+
+# Other variables:
+adapter: .byte 0 # Video adapter: 0=CGA/MDA/HGA,1=EGA,2=VGA
+video_segment: .word 0xb800 # Video memory segment
+force_size: .word 0 # Use this size instead of the one in BIOS vars
--- /dev/null
+extra-y := head.o head64.o init_task.o vmlwk.lds
+EXTRA_FLAGS := -traditional
+obj-y := percpu.o setup.o e820.o cpuinfo.o resource.o \
+ mpparse.o entry.o show.o syscall.o i387.o cpu.o \
+ lapic.o ioapic.o trampoline.o interrupts.o mpboot.o \
+ time.o sys_arch_prctl.o vsyscall.o xcall.o \
+ task.o sched.o
+
+obj-$(CONFIG_CRAY_XT) += rca/
--- /dev/null
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#if 0
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/suspend.h>
+#include <asm/pda.h>
+#include <asm/processor.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
+#endif
+
+#include <lwk/task.h>
+#include <arch/pda.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+/**
+ * This is used to automatically count the number of system calls.
+ * A table is generated with one entry for each system call defined in
+ * arch/unistd.h, which contains the list of system calls.
+ */
+#define __NO_STUBS 1
+#undef __SYSCALL
+#undef _ARCH_X86_64_UNISTD_H
+#define __SYSCALL(nr, sym) [nr] = 1,
+static char syscalls[] = {
+#include <arch/unistd.h>
+};
+
+int main(void)
+{
+#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
+#if 0
+ ENTRY(state);
+ ENTRY(flags);
+ ENTRY(thread);
+#endif
+ ENTRY(id);
+ BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(tsk_arch_ ## entry, offsetof(struct task_struct, arch) + offsetof(struct arch_task, entry))
+ ENTRY(flags);
+ ENTRY(addr_limit);
+ BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(tsk_arch_ ## entry, offsetof(struct task_struct, arch) + offsetof(struct arch_task, thread) + offsetof(struct thread_struct, entry))
+ ENTRY(rsp);
+ BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
+ ENTRY(kernelstack);
+ ENTRY(oldrsp);
+ ENTRY(pcurrent);
+ ENTRY(irqcount);
+ ENTRY(cpunumber);
+ ENTRY(irqstackptr);
+ ENTRY(data_offset);
+ BLANK();
+#undef ENTRY
+#if 0
+ DEFINE(pbe_address, offsetof(struct pbe, address));
+ DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+ DEFINE(pbe_next, offsetof(struct pbe, next));
+ BLANK();
+ DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
+#endif
+ BLANK();
+ DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+ return 0;
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/task.h>
+#include <lwk/cpu.h>
+#include <lwk/ptrace.h>
+#include <lwk/string.h>
+#include <lwk/delay.h>
+#include <arch/processor.h>
+#include <arch/desc.h>
+#include <arch/proto.h>
+#include <arch/i387.h>
+#include <arch/apic.h>
+#include <arch/tsc.h>
+
+/**
+ * Bitmap of CPUs that have been initialized.
+ */
+static cpumask_t cpu_initialized_map;
+
+/**
+ * Memory for STACKFAULT stacks, one for each CPU.
+ */
+char stackfault_stack[NR_CPUS][PAGE_SIZE]
+__attribute__((section(".bss.page_aligned")));
+
+/**
+ * Memory for DOUBLEFAULT stacks, one for each CPU.
+ */
+char doublefault_stack[NR_CPUS][PAGE_SIZE]
+__attribute__((section(".bss.page_aligned")));
+
+/**
+ * Memory for NMI stacks, one for each CPU.
+ */
+char nmi_stack[NR_CPUS][PAGE_SIZE]
+__attribute__((section(".bss.page_aligned")));
+
+/**
+ * Memory for DEBUG stacks, one for each CPU.
+ */
+char debug_stack[NR_CPUS][PAGE_SIZE]
+__attribute__((section(".bss.page_aligned")));
+
+/**
+ * Memory for MCE stacks, one for each CPU.
+ */
+char mce_stack[NR_CPUS][PAGE_SIZE]
+__attribute__((section(".bss.page_aligned")));
+
+/**
+ * Initializes the calling CPU's Per-CPU Data Area (PDA).
+ * When in kernel mode, each CPU's GS.base is loaded with the address of the
+ * CPU's PDA. This allows data in the PDA to be accessed using segment relative
+ * accesses, like:
+ *
+ * movl $gs:pcurrent,%rdi // move CPU's current task pointer to rdi
+ *
+ * This is similar to thread-local data for user-level programs.
+ */
+void __init
+pda_init(unsigned int cpu, struct task_struct *task)
+{
+ struct x8664_pda *pda = cpu_pda(cpu);
+
+ /*
+ * Point FS and GS at the NULL segment descriptor (entry 0) in the GDT.
+ * x86_64 does away with a lot of segmentation cruftiness... there's no
+ * need to set up specific GDT entries for FS or GS.
+ */
+ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
+
+ /*
+ * Load the address of this CPU's PDA into this CPU's GS_BASE model
+ * specific register. Upon entry to the kernel, the SWAPGS instruction
+ * is used to load the value from MSR_GS_BASE into the GS segment
+ * register's base address (GS.base). The user-level GS.base value
+ * is stored in MSR_GS_BASE. When the kernel is exited, SWAPGS is
+ * called again.
+ */
+ mb();
+ wrmsrl(MSR_GS_BASE, pda);
+ mb();
+
+ pda->cpunumber = cpu;
+ pda->pcurrent = task;
+ pda->active_aspace = task->aspace;
+ pda->kernelstack = (unsigned long)task - PDA_STACKOFFSET + TASK_SIZE;
+ pda->mmu_state = 0;
+}
+
+/**
+ * Initializes the calling CPU's Control Register 4 (CR4).
+ * The bootstrap assembly code has already partially setup this register.
+ * We only touch the bits we care about, leaving the others untouched.
+ */
+static void __init
+cr4_init(void)
+{
+ clear_in_cr4(
+ X86_CR4_VME | /* Disable Virtual-8086 support/cruft */
+ X86_CR4_PVI | /* Disable support for VIF flag */
+ X86_CR4_TSD | /* Allow RDTSC instruction at user-level */
+ X86_CR4_DE /* Disable debugging extensions */
+ );
+}
+
+/**
+ * Initializes and installs the calling CPU's Global Descriptor Table (GDT).
+ * Each CPU has its own GDT.
+ */
+static void __init
+gdt_init(void)
+{
+ unsigned int cpu = this_cpu;
+
+ /* The bootstrap CPU's GDT has already been setup */
+ if (cpu != 0)
+ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
+ cpu_gdt_descr[cpu].size = GDT_SIZE;
+
+ /* Install the CPU's GDT */
+ asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
+
+ /*
+ * Install the CPU's LDT... Local Descriptor Table.
+ * We have no need for a LDT, so we point it at the NULL descriptor.
+ */
+ asm volatile("lldt %w0":: "r" (0));
+}
+
+/**
+ * Installs the calling CPU's Local Descriptor Table (LDT).
+ * All CPUs share the same IDT.
+ */
+static void __init
+idt_init(void)
+{
+ /*
+ * The bootstrap CPU has already filled in the IDT table via the
+ * interrupts_init() call in setup.c. All we need to do is tell the CPU
+ * about it.
+ */
+ asm volatile("lidt %0" :: "m" (idt_descr));
+}
+
+/**
+ * Initializes and installs the calling CPU's Task State Segment (TSS).
+ */
+static void __init
+tss_init(void)
+{
+ unsigned int cpu = this_cpu;
+ struct tss_struct *tss = &per_cpu(tss, cpu);
+ int i;
+
+ /*
+ * Initialize the CPU's Interrupt Stack Table.
+ * Certain exceptions and interrupts are handled with their own,
+ * known good stack. The IST holds the address of these stacks.
+ */
+ tss->ist[STACKFAULT_STACK-1] = (unsigned long)&stackfault_stack[cpu][0];
+ tss->ist[DOUBLEFAULT_STACK-1] = (unsigned long)&doublefault_stack[cpu][0];
+ tss->ist[NMI_STACK-1] = (unsigned long)&nmi_stack[cpu][0];
+ tss->ist[DEBUG_STACK-1] = (unsigned long)&debug_stack[cpu][0];
+ tss->ist[MCE_STACK-1] = (unsigned long)&mce_stack[cpu][0];
+
+ /*
+ * Initialize the CPU's I/O permission bitmap.
+ * The <= is required because the CPU will access up to 8 bits beyond
+ * the end of the IO permission bitmap.
+ */
+ tss->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+ for (i = 0; i <= IO_BITMAP_LONGS; i++)
+ tss->io_bitmap[i] = ~0UL;
+
+ /*
+ * Install the CPU's TSS and load the CPU's Task Register (TR).
+ * Each CPU has its own TSS.
+ */
+ set_tss_desc(cpu, tss);
+ asm volatile("ltr %w0":: "r" (GDT_ENTRY_TSS*8));
+}
+
+/**
+ * Initializes various Model Specific Registers (MSRs) of the calling CPU.
+ */
+static void __init
+msr_init(void)
+{
+ /*
+ * Setup the MSRs needed to support the SYSCALL and SYSRET
+ * instructions. Really, you should read the manual to understand these
+ * gems. In summary, STAR and LSTAR specify the CS, SS, and RIP to
+ * install when the SYSCALL instruction is issued. They also specify the
+ * CS and SS to install on SYSRET.
+ *
+ * On SYSCALL, the x86_64 CPU control unit uses STAR to load CS and SS and
+ * LSTAR to load RIP. The old RIP is saved in RCX.
+ *
+ * On SYSRET, the control unit uses STAR to restore CS and SS.
+ * RIP is loaded from RCX.
+ *
+ * SYSCALL_MASK specifies the RFLAGS to clear on SYSCALL.
+ */
+ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | /* SYSRET CS+SS */
+ ((u64)__KERNEL_CS)<<32); /* SYSCALL CS+SS */
+ wrmsrl(MSR_LSTAR, asm_syscall); /* SYSCALL RIP */
+ wrmsrl(MSR_CSTAR, asm_syscall_ignore); /* RIP for compat. mode */
+ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
+
+ /*
+ * Setup MSRs needed to support the PDA.
+ * pda_init() initialized MSR_GS_BASE already. When the SWAPGS
+ * instruction is issued, the x86_64 control unit atomically swaps
+ * MSR_GS_BASE and MSR_KERNEL_GS_BASE. So, when we call SWAPGS to
+ * exit the kernel, the value in MSR_KERNEL_GS_BASE will be loaded.
+ * User-space will see MSR_FS_BASE and MSR_GS_BASE both set to 0.
+ */
+ wrmsrl(MSR_FS_BASE, 0);
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+}
+
+/**
+ * Initializes the calling CPU's debug registers.
+ */
+static void __init
+dbg_init(void)
+{
+ /*
+ * Clear the CPU's debug registers.
+ * DR[0-3] are Address-Breakpoint Registers
+ * DR[4-5] are reserved and should not be used by software
+ * DR6 is the Debug Status Register
+ * DR7 is the Debug Control Register
+ */
+ set_debugreg(0UL, 0);
+ set_debugreg(0UL, 1);
+ set_debugreg(0UL, 2);
+ set_debugreg(0UL, 3);
+ set_debugreg(0UL, 6);
+ set_debugreg(0UL, 7);
+}
+
+void __init
+cpu_init(void)
+{
+ /*
+ * Get a reference to the currently executing task and the ID of the
+ * CPU being initialized. We can't use the normal 'current' mechanism
+ * since it relies on the PDA being initialized, which it isn't for all
+ * CPUs other than the boot CPU (id=0). pda_init() is called below.
+ */
+ struct task_struct *me = get_current_via_RSP();
+ unsigned int cpu = me->cpu_id; /* logical ID */
+
+ if (cpu_test_and_set(cpu, cpu_initialized_map))
+ panic("CPU#%u already initialized!\n", cpu);
+ printk(KERN_DEBUG "Initializing CPU#%u\n", cpu);
+
+ pda_init(cpu, me); /* per-cpu data area */
+ identify_cpu(); /* determine cpu features via CPUID */
+ cr4_init(); /* control register 4 */
+ gdt_init(); /* global descriptor table */
+ idt_init(); /* interrupt descriptor table */
+ tss_init(); /* task state segment */
+ msr_init(); /* misc. model specific registers */
+ dbg_init(); /* debug registers */
+ fpu_init(); /* floating point unit */
+ lapic_init(); /* local advanced prog. interrupt controller */
+ time_init(); /* detects CPU frequency, udelay(), etc. */
+ barrier(); /* compiler memory barrier, avoids reordering */
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/smp.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/aspace.h>
+#include <arch/processor.h>
+#include <arch/proto.h>
+
+/**
+ * Information about the boot CPU.
+ * The CPU capabilities stored in this structure are the lowest common
+ * denominator for all CPUs in the system... in this sense, boot_cpu_data
+ * is special compared to the corresponding entry in the cpu_info[] array.
+ */
+struct cpuinfo boot_cpu_data;
+
+/**
+ * On AMD multi-core CPUs, the lower bits of the local APIC ID distinquish the
+ * cores. This function assumes the number of cores is a power of two.
+ */
+static void __init
+amd_detect_cmp(struct cpuinfo *c)
+{
+ struct arch_cpuinfo *a = &c->arch;
+ unsigned bits;
+ unsigned ecx = cpuid_ecx(0x80000008);
+
+ a->x86_pkg_cores = (ecx & 0xff) + 1;
+
+ /* CPU telling us the core id bits shift? */
+ bits = (ecx >> 12) & 0xF;
+
+ /* Otherwise recompute */
+ if (bits == 0) {
+ while ((1 << bits) < a->x86_pkg_cores)
+ bits++;
+ }
+
+ /* Determine the physical socket ID */
+ c->phys_socket_id = c->physical_id >> bits;
+
+ /* Determine the physical core ID (index of core in socket) */
+ c->phys_core_id = c->physical_id & ((1 << bits)-1);
+}
+
+static void __init
+amd_cpu(struct cpuinfo *c)
+{
+ unsigned level;
+ unsigned long value;
+ unsigned int eax, ebx, ecx, edx;
+ struct arch_cpuinfo *a = &c->arch;
+
+ /*
+ * Disable TLB flush filter by setting HWCR.FFDIS on K8
+ * bit 6 of msr C001_0015
+ *
+ * Errata 63 for SH-B3 steppings
+ * Errata 122 for all steppings (F+ have it disabled by default)
+ */
+ if (a->x86_family == 15) {
+ rdmsrl(MSR_K8_HWCR, value);
+ value |= 1 << 6;
+ wrmsrl(MSR_K8_HWCR, value);
+ }
+
+ /*
+ * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+ * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+ */
+ clear_bit(0*32+31, &a->x86_capability);
+
+ /* On C+ stepping K8 rep microcode works well for copy/memset */
+ level = cpuid_eax(1);
+ if (a->x86_family == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
+ set_bit(X86_FEATURE_REP_GOOD, &a->x86_capability);
+ if (a->x86_family == 0x10)
+ set_bit(X86_FEATURE_REP_GOOD, &a->x86_capability);
+
+ /* Enable workaround for FXSAVE leak */
+ if (a->x86_family >= 6)
+ set_bit(X86_FEATURE_FXSAVE_LEAK, &a->x86_capability);
+
+ /* Determine L1 Cache and TLB Information */
+ if (a->extended_cpuid_level >= 0x80000005) {
+ cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
+
+ /* 2-MB L1 TLB (inclusive with L2 TLB) */
+ a->x86_tlb_size[INST][L1][PAGE_2MB] = (eax & 0xff);
+ a->x86_tlb_size[DATA][L1][PAGE_2MB] = ((eax >> 16) & 0xff);
+
+ /* 4-KB L1 TLB (inclusive with L2 TLB) */
+ a->x86_tlb_size[INST][L1][PAGE_4KB] = (ebx & 0xff);
+ a->x86_tlb_size[DATA][L1][PAGE_4KB] = ((ebx >> 16) & 0xff);
+
+ /* L1 Instruction Cache */
+ a->x86_cache_size[INST][L1] = (edx >> 24);
+ a->x86_cache_line[INST][L1] = (edx & 0xff);
+
+ /* L1 Data Cache */
+ a->x86_cache_size[DATA][L1] = (ecx >> 24);
+ a->x86_cache_line[DATA][L1] = (ecx & 0xff);
+ }
+
+ /* Determine L2 Cache and TLB Information */
+ if (a->extended_cpuid_level >= 0x80000006) {
+ cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
+
+ /* 2-MB L2 TLB */
+ if ((eax & 0xffff0000) == 0) {
+ /* Unified I+D 2-MB L2 TLB */
+ a->x86_tlb_size[UNIF][L2][PAGE_2MB] = eax & 0xfff;
+ } else {
+ a->x86_tlb_size[INST][L2][PAGE_2MB] = eax & 0xfff;
+ a->x86_tlb_size[DATA][L2][PAGE_2MB] = (eax>>16) & 0xfff;
+ }
+
+ /* 4-KB L2 TLB */
+ if ((ebx & 0xffff0000) == 0) {
+ /* Unified I+D 4-KB L2 TLB */
+ a->x86_tlb_size[UNIF][L2][PAGE_4KB] = ebx & 0xfff;
+ } else {
+ a->x86_tlb_size[INST][L2][PAGE_4KB] = ebx & 0xfff;
+ a->x86_tlb_size[DATA][L2][PAGE_4KB] = (ebx>>16) & 0xfff;
+ }
+
+ /* Unified L2 Cache */
+ a->x86_cache_size[UNIF][L2] = ecx >> 16;
+ a->x86_cache_line[UNIF][L2] = ecx & 0xff;
+ }
+
+ /* Determine Advanced Power Management Features */
+ if (a->extended_cpuid_level >= 0x80000007) {
+ a->x86_power = cpuid_edx(0x80000007);
+ }
+
+ /* Determine Maximum Address Sizes */
+ if (a->extended_cpuid_level >= 0x80000008) {
+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+ a->x86_virt_bits = (eax >> 8) & 0xff;
+ a->x86_phys_bits = eax & 0xff;
+ }
+
+ /* a->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
+ if (a->x86_power & (1<<8))
+ set_bit(X86_FEATURE_CONSTANT_TSC, &a->x86_capability);
+
+ /* Multi core CPU? */
+ if (a->extended_cpuid_level >= 0x80000008)
+ amd_detect_cmp(c);
+
+ if (a->x86_family == 0xf || a->x86_family == 0x10 || a->x86_family == 0x11)
+ set_bit(X86_FEATURE_K8, &a->x86_capability);
+
+ /* RDTSC can be speculated around */
+ clear_bit(X86_FEATURE_SYNC_RDTSC, &a->x86_capability);
+
+ /* Family 10 doesn't support C states in MWAIT so don't use it */
+ if (a->x86_family == 0x10)
+ clear_bit(X86_FEATURE_MWAIT, &a->x86_capability);
+}
+
+static void __init
+intel_cpu(struct cpuinfo *c)
+{
+ /* TODO */
+}
+
+/*
+ * Do some early cpuid on the boot CPU to get some parameter that are
+ * needed before check_bugs. Everything advanced is in identify_cpu
+ * below.
+ */
+void __init
+early_identify_cpu(struct cpuinfo *c)
+{
+ struct arch_cpuinfo *a = &c->arch;
+ uint32_t tfms;
+ uint32_t misc;
+
+ /*
+ * Zero structure, except apic_id should have already been filled in.
+ */
+ uint8_t apic_id = a->apic_id;
+ memset(a, 0, sizeof(*a));
+ a->apic_id = apic_id;
+
+ /*
+ * Set some defaults to begin with.
+ */
+ a->x86_vendor_id[0] = '\0'; /* Unset */
+ a->x86_model_id[0] = '\0'; /* Unset */
+ a->x86_clflush_size = 64;
+ a->x86_pkg_cores = 1;
+ a->max_cpu_khz = 1000000; /* start out with 1 GHz */
+ a->min_cpu_khz = a->max_cpu_khz;
+ a->cur_cpu_khz = a->max_cpu_khz;
+ a->tsc_khz = a->max_cpu_khz;
+ memset(&a->x86_capability, 0, sizeof(a->x86_capability));
+
+ /* Determine the CPU vendor */
+ cpuid(0x00000000, &a->cpuid_level,
+ (unsigned int *)&a->x86_vendor_id[0],
+ (unsigned int *)&a->x86_vendor_id[8],
+ (unsigned int *)&a->x86_vendor_id[4]);
+
+ /* Derive the vendor ID from the vendor string */
+ if (!strcmp(a->x86_vendor_id, "AuthenticAMD"))
+ a->x86_vendor = X86_VENDOR_AMD;
+ else if (!strcmp(a->x86_vendor_id, "GenuineIntel"))
+ a->x86_vendor = X86_VENDOR_INTEL;
+ else
+ a->x86_vendor = X86_VENDOR_UNKNOWN;
+
+ if (a->cpuid_level == 0)
+ panic("CPU only has CPUID level 0... is your CPU ancient?");
+
+ /*
+ * Determine Intel-defined CPU features and other standard info.
+ * NOTE: Vendor-specific code may override these later.
+ */
+ cpuid(0x00000001,
+ &tfms, /* type, family, model, stepping */
+ &misc, /* brand, cflush sz, logical cpus, apic id */
+ &a->x86_capability[4], /* extended cpu features */
+ &a->x86_capability[0] /* cpu features */
+ );
+
+ /* Determine the CPU family */
+ a->x86_family = (tfms >> 8) & 0xf;
+ if (a->x86_family == 0xf)
+ a->x86_family += ((tfms >> 20) & 0xff);
+
+ /* Determine the CPU model */
+ a->x86_model = (tfms >> 4) & 0xf;
+ if (a->x86_family >= 0x6)
+ a->x86_model += (((tfms >> 16) & 0xf) << 4);
+
+ /* Determine the CPU stepping */
+ a->x86_stepping = tfms & 0xf;
+
+ /* Determine the CLFLUSH size, if the CPU supports CLFLUSH */
+ if (a->x86_capability[0] & (1 << X86_FEATURE_CLFLSH))
+ a->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
+
+ /*
+ * Determine the CPU's initial local APIC ID.
+ * NOTE: The BIOS may change the CPU's Local APIC ID before
+ * passing control to the OS kernel, however the value
+ * reported by CPUID will never change. The initial APIC
+ * ID can sometimes be used to discover CPU topology.
+ */
+ a->initial_lapic_id = (misc >> 24) & 0xff;
+
+ /* TODO: determine page sizes supported via CPUID */
+ c->pagesz_mask = (VM_PAGE_4KB | VM_PAGE_2MB);
+}
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init
+identify_cpu(void)
+{
+ int i;
+ struct cpuinfo *c = &cpu_info[this_cpu];
+ struct arch_cpuinfo *a = &c->arch;
+
+ early_identify_cpu(c);
+
+ /* Determine the extended CPUID level */
+ a->extended_cpuid_level = cpuid_eax(0x80000000);
+
+ /* Parse extended CPUID information */
+ if ((a->extended_cpuid_level & 0xffff0000) == 0x80000000) {
+ /* Determine AMD-defined CPU features: level 0x80000001 */
+ if (a->extended_cpuid_level >= 0x80000001) {
+ a->x86_capability[1] = cpuid_edx(0x80000001);
+ a->x86_capability[6] = cpuid_ecx(0x80000001);
+ }
+
+ /* Determine processor brand/model string */
+ if (a->extended_cpuid_level >= 0x80000004) {
+ unsigned int *v = (unsigned int *) a->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ a->x86_model_id[48] = '\0';
+ } else {
+ strcpy(a->x86_model_id, "Unknown x86-64 Model");
+ }
+ }
+
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ switch (a->x86_vendor) {
+ case X86_VENDOR_AMD:
+ amd_cpu(c);
+ break;
+
+ case X86_VENDOR_INTEL:
+ intel_cpu(c);
+ break;
+
+ case X86_VENDOR_UNKNOWN:
+ default:
+ panic("Unknown x86 CPU Vendor.");
+ }
+
+ /*
+ * boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if (c != &boot_cpu_data) {
+ /* AND the already accumulated flags with these */
+ for (i = 0 ; i < NCAPINTS ; i++)
+ boot_cpu_data.arch.x86_capability[i] &= c->arch.x86_capability[i];
+ }
+}
+
+/**
+ * Prints architecture specific CPU information to the console.
+ */
+void
+print_arch_cpuinfo(struct cpuinfo *c)
+{
+ int i;
+ struct arch_cpuinfo *a = &c->arch;
+ char buf[1024];
+
+ /*
+ * These flag bits must match the definitions in <arch/cpufeature.h>.
+ * NULL means this bit is undefined or reserved; either way it doesn't
+ * have meaning as far as the kernel is concerned.
+ */
+ static char *x86_cap_flags[] = {
+ /* Intel-defined */
+ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
+
+ /* AMD-defined */
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
+ NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
+ "3dnowext", "3dnow",
+
+ /* Transmeta-defined */
+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Other (Linux-defined) */
+ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
+ NULL, NULL, NULL, NULL,
+ "constant_tsc", "up", NULL, "arch_perfmon",
+ "pebs", "bts", NULL, "sync_rdtsc",
+ "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Intel-defined (#2) */
+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
+ "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
+ NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* VIA/Cyrix/Centaur-defined */
+ NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
+ "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* AMD-defined (#2) */
+ "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
+ "altmovcr8", "abm", "sse4a",
+ "misalignsse", "3dnowprefetch",
+ "osvw", "ibs", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Auxiliary (Linux-defined) */
+ "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ };
+ static char *x86_power_flags[] = {
+ "ts", /* temperature sensor */
+ "fid", /* frequency id control */
+ "vid", /* voltage id control */
+ "ttp", /* thermal trip */
+ "tm",
+ "stc",
+ "100mhzsteps",
+ "hwpstate",
+ "", /* tsc invariant mapped to constant_tsc */
+ /* nothing */
+ };
+
+ printk(KERN_DEBUG " Vendor: %s\n", a->x86_vendor_id);
+ printk(KERN_DEBUG " Family: %u\n", a->x86_family);
+ printk(KERN_DEBUG " Model: %u (%s)\n", a->x86_model, a->x86_model_id);
+ printk(KERN_DEBUG " Stepping: %u\n", a->x86_stepping);
+ printk(KERN_DEBUG " Frequency: %u.%03u MHz (max=%u.%03u, min=%u.%03u)\n",
+ a->cur_cpu_khz / 1000, (a->cur_cpu_khz % 1000),
+ a->max_cpu_khz / 1000, (a->max_cpu_khz % 1000),
+ a->min_cpu_khz / 1000, (a->min_cpu_khz % 1000));
+
+ /* L1 Cache Info */
+ if (a->x86_cache_size[UNIF][L1] == 0) {
+ printk(KERN_DEBUG " L1 Cache: I=%u KB, D=%u KB, line size=%u bytes\n",
+ a->x86_cache_size[INST][L1],
+ a->x86_cache_size[DATA][L1],
+ a->x86_cache_line[DATA][L1]);
+ } else {
+ printk(KERN_DEBUG " L1 Cache: %u KB (unified I+D), line size=%u bytes\n",
+ a->x86_cache_size[UNIF][L1],
+ a->x86_cache_line[UNIF][L1]);
+ }
+
+ /* L2 Cache Info */
+ if (a->x86_cache_size[UNIF][L2] == 0) {
+ printk(KERN_DEBUG " L2 Cache: I=%u KB, D=%u KB, line size=%u bytes\n",
+ a->x86_cache_size[INST][L2],
+ a->x86_cache_size[DATA][L2],
+ a->x86_cache_line[DATA][L2]);
+ } else {
+ printk(KERN_DEBUG " L2 Cache: %u KB (unified I+D), line size=%u bytes\n",
+ a->x86_cache_size[UNIF][L2],
+ a->x86_cache_line[UNIF][L2]);
+ }
+
+ /* 4-KB Page TLB Info */
+ printk(KERN_DEBUG " 4-KB TLB: I=%u/%u entries D=%d/%d entries\n",
+ a->x86_tlb_size[INST][L1][PAGE_4KB],
+ a->x86_tlb_size[INST][L2][PAGE_4KB],
+ a->x86_tlb_size[DATA][L1][PAGE_4KB],
+ a->x86_tlb_size[DATA][L2][PAGE_4KB]
+ );
+
+ /* 2-MB Page TLB Info */
+ printk(KERN_DEBUG " 2-MB TLB: I=%u/%u entries D=%d/%d entries\n",
+ a->x86_tlb_size[INST][L1][PAGE_2MB],
+ a->x86_tlb_size[INST][L2][PAGE_2MB],
+ a->x86_tlb_size[DATA][L1][PAGE_2MB],
+ a->x86_tlb_size[DATA][L2][PAGE_2MB]
+ );
+
+ /* 1-GB Page TLB Info */
+ printk(KERN_DEBUG " 1-GB TLB: I=%u/%u entries D=%d/%d entries\n",
+ a->x86_tlb_size[INST][L1][PAGE_1GB],
+ a->x86_tlb_size[INST][L2][PAGE_1GB],
+ a->x86_tlb_size[DATA][L1][PAGE_1GB],
+ a->x86_tlb_size[DATA][L2][PAGE_1GB]
+ );
+
+ /* Address bits */
+ printk(KERN_DEBUG " Address bits: %u bits physical, %u bits virtual\n",
+ a->x86_phys_bits,
+ a->x86_virt_bits);
+
+ /* Bytes flushed by CLFLUSH instruction */
+ printk(KERN_DEBUG " CLFLUSH size: %u bytes\n", a->x86_clflush_size);
+
+ /* CPU Features */
+ buf[0] = '\0';
+ for (i = 0; i < 32*NCAPINTS; i++) {
+ if (cpu_has(c, i) && x86_cap_flags[i] != NULL) {
+ strcat(buf, x86_cap_flags[i]);
+ strcat(buf, " ");
+ }
+ }
+ printk(KERN_DEBUG " CPU Features: %s\n", buf);
+
+ /* Power Management Features */
+ if (a->x86_power == 0) {
+ strcpy(buf, "none");
+ } else {
+ buf[0] = '\0';
+ for (i = 0; i < 32; i++) {
+ if ((i < ARRAY_SIZE(x86_power_flags)) && x86_power_flags[i]) {
+ strcat(buf, x86_power_flags[i]);
+ strcat(buf, " ");
+ } else {
+ char bit_str[7];
+ bit_str[0] = '\0';
+ sprintf(bit_str, "[%d] ", i);
+ strcat(buf, bit_str);
+ }
+ }
+ }
+ printk(KERN_DEBUG " Power Features: %s\n", buf);
+}
+
--- /dev/null
+/*
+ * Handle the memory map.
+ * The functions here do the job until bootmem takes over.
+ * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
+ *
+ * Getting sanitize_e820_map() in sync with i386 version by applying change:
+ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
+ * Alex Achenbach <xela@slit.de>, December 2002.
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ *
+ */
+#include <lwk/kernel.h>
+#include <lwk/types.h>
+#include <lwk/init.h>
+#include <lwk/bootmem.h>
+#include <lwk/resource.h>
+#include <lwk/string.h>
+#include <lwk/linux_compat.h>
+
+#include <arch/page.h>
+#include <arch/pgtable.h>
+#include <arch/e820.h>
+#include <arch/proto.h>
+#include <arch/bootsetup.h>
+#include <arch/sections.h>
+
+/**
+ * The BIOS "e820" map of memory.
+ */
+struct e820map e820;
+
+/*
+ * PFN of last memory page.
+ */
+unsigned long end_pfn;
+EXPORT_SYMBOL(end_pfn);
+
+/*
+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
+ * The direct mapping extends to end_pfn_map, so that we can directly access
+ * apertures, ACPI and other tables without having to play with fixmaps.
+ */
+unsigned long end_pfn_map;
+
+/*
+ * Last pfn which the user wants to use.
+ */
+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
+
+extern struct resource code_resource, data_resource;
+
+/* Check for some hardcoded bad areas that early boot is not allowed to touch */
+static inline int bad_addr(unsigned long *addrp, unsigned long size)
+{
+ unsigned long addr = *addrp, last = addr + size;
+
+ /* various gunk below that needed for SMP startup */
+ if (addr < 0x8000) {
+ *addrp = 0x8000;
+ return 1;
+ }
+
+ /* direct mapping tables of the kernel */
+ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
+ *addrp = table_end << PAGE_SHIFT;
+ return 1;
+ }
+
+ /* initrd image */
+ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
+ addr < INITRD_START+INITRD_SIZE) {
+ *addrp = INITRD_START + INITRD_SIZE;
+ return 1;
+ }
+
+ /* kernel code + 640k memory hole (later should not be needed, but
+ be paranoid for now) */
+ if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
+ *addrp = __pa_symbol(&_end);
+ return 1;
+ }
+
+ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
+ *addrp = ebda_addr + ebda_size;
+ return 1;
+ }
+
+ /* XXX ramdisk image here? */
+ return 0;
+}
+
+/*
+ * This function checks if any part of the range <start,end> is mapped
+ * with type.
+ */
+int __init
+e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ if (type && ei->type != type)
+ continue;
+ if (ei->addr >= end || ei->addr + ei->size <= start)
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * This function checks if the entire range <start,end> is mapped with type.
+ *
+ * Note: this function only works correct if the e820 table is sorted and
+ * not-overlapping, which is the case
+ */
+int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ if (type && ei->type != type)
+ continue;
+ /* is the region (part) in overlap with the current region ?*/
+ if (ei->addr >= end || ei->addr + ei->size <= start)
+ continue;
+
+ /* if the region is at the beginning of <start,end> we move
+ * start to the end of the region since it's ok until there
+ */
+ if (ei->addr <= start)
+ start = ei->addr + ei->size;
+ /* if start is now at or beyond end, we're done, full coverage */
+ if (start >= end)
+ return 1; /* we're done */
+ }
+ return 0;
+}
+
+/*
+ * Find a free area in a specific range.
+ */
+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long addr = ei->addr, last;
+ if (ei->type != E820_RAM)
+ continue;
+ if (addr < start)
+ addr = start;
+ if (addr > ei->addr + ei->size)
+ continue;
+ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
+ ;
+ last = addr + size;
+ if (last > ei->addr + ei->size)
+ continue;
+ if (last > end)
+ continue;
+ return addr;
+ }
+ return -1UL;
+}
+
+/*
+ * Free bootmem based on the e820 table for a node.
+ */
+void __init e820_bootmem_free(unsigned long start, unsigned long end)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long last, addr;
+
+ if (ei->type != E820_RAM ||
+ ei->addr+ei->size <= start ||
+ ei->addr >= end)
+ continue;
+
+ addr = round_up(ei->addr, PAGE_SIZE);
+ if (addr < start)
+ addr = start;
+
+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (last >= end)
+ last = end;
+
+ if (last > addr && last-addr >= PAGE_SIZE)
+ free_bootmem(addr, last-addr);
+ }
+}
+
+/*
+ * Find the highest page frame number we have available
+ */
+unsigned long __init e820_end_of_ram(void)
+{
+ int i;
+ unsigned long end_pfn = 0;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = round_up(ei->addr, PAGE_SIZE);
+ end = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RAM) {
+ if (end > end_pfn<<PAGE_SHIFT)
+ end_pfn = end>>PAGE_SHIFT;
+ } else {
+ if (end > end_pfn_map<<PAGE_SHIFT)
+ end_pfn_map = end>>PAGE_SHIFT;
+ }
+ }
+
+ if (end_pfn > end_pfn_map)
+ end_pfn_map = end_pfn;
+ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
+ end_pfn_map = MAXMEM>>PAGE_SHIFT;
+ if (end_pfn > end_user_pfn)
+ end_pfn = end_user_pfn;
+ if (end_pfn > end_pfn_map)
+ end_pfn = end_pfn_map;
+
+ return end_pfn;
+}
+
+/*
+ * Compute how much memory is missing in a range.
+ * Unlike the other functions in this file the arguments are in page numbers.
+ */
+unsigned long __init
+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long ram = 0;
+ unsigned long start = start_pfn << PAGE_SHIFT;
+ unsigned long end = end_pfn << PAGE_SHIFT;
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long last, addr;
+
+ if (ei->type != E820_RAM ||
+ ei->addr+ei->size <= start ||
+ ei->addr >= end)
+ continue;
+
+ addr = round_up(ei->addr, PAGE_SIZE);
+ if (addr < start)
+ addr = start;
+
+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (last >= end)
+ last = end;
+
+ if (last > addr)
+ ram += last - addr;
+ }
+ return ((end - start) - ram) >> PAGE_SHIFT;
+}
+
+/*
+ * Mark e820 reserved areas as busy for the resource manager.
+ */
+void __init e820_reserve_resources(void)
+{
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+ res = alloc_bootmem(sizeof(struct resource));
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+#ifdef CONFIG_KEXEC
+ request_resource(res, &crashk_res);
+#endif
+ }
+ }
+}
+
+/*
+ * Add a memory region to the kernel e820 map.
+ */
+void __init add_memory_region(unsigned long start, unsigned long size, int type)
+{
+ int x = e820.nr_map;
+
+ if (x == E820MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ e820.map[x].addr = start;
+ e820.map[x].size = size;
+ e820.map[x].type = type;
+ e820.nr_map++;
+}
+
+void __init e820_print_map(char *who)
+{
+ int i;
+ char type[16];
+
+ for (i = 0; i < e820.nr_map; i++) {
+ switch (e820.map[i].type) {
+ case E820_RAM: sprintf(type, "(usable)\n");
+ break;
+ case E820_RESERVED:
+ sprintf(type, "(reserved)\n");
+ break;
+ case E820_ACPI:
+ sprintf(type, "(ACPI data)\n");
+ break;
+ case E820_NVS:
+ sprintf(type, "(ACPI NVS)\n");
+ break;
+ default: sprintf(type, "type %u\n", e820.map[i].type);
+ break;
+ }
+
+ printk(KERN_DEBUG
+ " %s: %016Lx - %016Lx %s", who,
+ (unsigned long long) e820.map[i].addr,
+ (unsigned long long) (e820.map[i].addr + e820.map[i].size),
+ type);
+ }
+}
+
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+ struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+ };
+ static struct change_member change_point_list[2*E820MAX] __initdata;
+ static struct change_member *change_point[2*E820MAX] __initdata;
+ static struct e820entry *overlap_list[E820MAX] __initdata;
+ static struct e820entry new_bios[E820MAX] __initdata;
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr, chg_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory types)...
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i=0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses),
+ omitting those that are for empty memory regions */
+ chgidx = 0;
+ for (i=0; i < old_nr; i++) {
+ if (biosmap[i].size != 0) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+ }
+ chg_nr = chgidx;
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i=1; i < chg_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap */
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr == change_point[i-1]->addr) &&
+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+ )
+ {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing=1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries=0; /* number of entries in the overlap table */
+ new_bios_entry=0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx=0; chgidx < chg_nr; chgidx++)
+ {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+ {
+ /* add map entry to overlap list (> 1 entry implies an overlap) */
+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+ }
+ else
+ {
+ /* remove entry from list (order independent, so swap with last) */
+ for (i=0; i<overlap_entries; i++)
+ {
+ if (overlap_list[i] == change_point[chgidx]->pbios)
+ overlap_list[i] = overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use */
+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i=0; i<overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new bios map based on this information */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was non-zero */
+ if (new_bios[new_bios_entry].size != 0)
+ if (++new_bios_entry >= E820MAX)
+ break; /* no more space left for new bios entries */
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr=change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_bios_entry; /* retain count for new bios entries */
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+
+ do {
+ unsigned long start = biosmap->addr;
+ unsigned long size = biosmap->size;
+ unsigned long end = start + size;
+ unsigned long type = biosmap->type;
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+ return -1;
+
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+ *
+ * This should be removed on Hammer which is supposed to not
+ * have non e820 covered ISA mappings there, but I had some strange
+ * problems so it stays for now. -AK
+ */
+ if (type == E820_RAM) {
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+ if (start < 0xA0000ULL)
+ add_memory_region(start, 0xA0000ULL-start, type);
+ if (end <= 0x100000ULL)
+ continue;
+ start = 0x100000ULL;
+ size = end - start;
+ }
+ }
+
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+}
+
+void __init setup_memory_region(void)
+{
+ char *who = "BIOS-e820";
+
+ /*
+ * Try to copy the BIOS-supplied E820-map.
+ *
+ * Otherwise fake a memory map; one section from 0k->640k,
+ * the next section from 1mb->appropriate_mem_k
+ */
+ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
+ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
+ unsigned long mem_size;
+
+ /* compare results from other methods and take the greater */
+ if (ALT_MEM_K < EXT_MEM_K) {
+ mem_size = EXT_MEM_K;
+ who = "BIOS-88";
+ } else {
+ mem_size = ALT_MEM_K;
+ who = "BIOS-e801";
+ }
+
+ e820.nr_map = 0;
+ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
+ }
+
+ printk(KERN_DEBUG "BIOS-provided physical RAM map:\n");
+ e820_print_map(who);
+
+ /* This also sets end_pfn_map */
+ end_pfn = e820_end_of_ram();
+}
+
+void __init parse_memopt(char *p, char **from)
+{
+ end_user_pfn = memparse(p, from);
+ end_user_pfn >>= PAGE_SHIFT;
+}
+
+void __init parse_memmapopt(char *p, char **from)
+{
+ unsigned long long start_at, mem_size;
+
+ mem_size = memparse(p, from);
+ p = *from;
+ if (*p == '@') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_RAM);
+ } else if (*p == '#') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_ACPI);
+ } else if (*p == '$') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_RESERVED);
+ } else {
+ end_user_pfn = (mem_size >> PAGE_SHIFT);
+ }
+ p = *from;
+}
+
+unsigned long pci_mem_start = 0xaeedbabe;
+
+/*
+ * Search for the biggest gap in the low 32 bits of the e820
+ * memory space. We pass this space to PCI to assign MMIO resources
+ * for hotplug or unconfigured devices in.
+ * Hopefully the BIOS let enough space left.
+ */
+__init void e820_setup_gap(void)
+{
+ unsigned long gapstart, gapsize, round;
+ unsigned long last;
+ int i;
+ int found = 0;
+
+ last = 0x100000000ull;
+ gapstart = 0x10000000;
+ gapsize = 0x400000;
+ i = e820.nr_map;
+ while (--i >= 0) {
+ unsigned long long start = e820.map[i].addr;
+ unsigned long long end = start + e820.map[i].size;
+
+ /*
+ * Since "last" is at most 4GB, we know we'll
+ * fit in 32 bits if this condition is true
+ */
+ if (last > end) {
+ unsigned long gap = last - end;
+
+ if (gap > gapsize) {
+ gapsize = gap;
+ gapstart = end;
+ found = 1;
+ }
+ }
+ if (start < last)
+ last = start;
+ }
+
+ if (!found) {
+ gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
+ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
+ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
+ }
+
+ /*
+ * See how much we want to round up: start off with
+ * rounding to the next 1MB area.
+ */
+ round = 0x100000;
+ while ((gapsize >> 4) > round)
+ round += round;
+ /* Fun with two's complement */
+ pci_mem_start = (gapstart + round) & -round;
+
+ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
+ pci_mem_start, gapstart, gapsize);
+}
--- /dev/null
+#include <lwk/linkage.h>
+#include <lwk/errno.h>
+#include <arch/ptrace.h>
+#include <arch/asm-offsets.h>
+#include <arch/idt_vectors.h>
+#include <arch/dwarf2.h>
+#include <arch/task.h>
+
+
+/**
+ * This performs the architecture-specific portion of a context switch.
+ * Normally, this is called in the context of prev and returns in the
+ * context of next. However, new tasks are handled differently. Since
+ * new tasks do not yet have a kernel context (rather, their kernel
+ * stack just has the pt_regs to use for the new task), execution returns
+ * directly to the new task, rather than context_switch().
+ *
+ * Input Registers:
+ * RDI = prev
+ * RSI = next
+ *
+ * Output Registers::
+ * RAX = prev (same value as on entry)
+ *
+ * C Prototype:
+ * struct task_struct *arch_context_switch(struct task_struct *prev,
+ * struct task_struct *next);
+ * arch_context_switch() returns prev
+ *
+ * NOTE: External interrupts are disabled on entry.
+ */
+ENTRY(arch_context_switch)
+ /* Save prev's callee saved registers (others saved by caller) */
+ pushf
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ /* Switch to next's stack */
+ movq %rsp, tsk_arch_rsp(%rdi)
+ movq tsk_arch_rsp(%rsi), %rsp
+
+ /* Call C code to do more stuff (save/restore FPU, update PDA, ...) */
+ call __arch_context_switch
+ /* returns with %rax set to prev */
+ movq %rax, %rdi
+ movq %gs:pda_pcurrent, %rsi
+
+ /* New tasks need to be kick-started */
+ lock btr $_TF_NEW_TASK_BIT, tsk_arch_flags(%rsi)
+ jc kickstart_new_task
+
+ /* Restore next's callee saved registers */
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
+ popf;
+
+ /* Return to context_switch(), with new task active */
+ retq
+
+kickstart_new_task:
+ call schedule_new_task_tail /* Finish up schedule(), drop locks, etc. */
+ testl $3, CS(%rsp) /* Sets ZF=1 if returning to kernel-space */
+ je 1f /* If ZF=1, leave kernel PDA in place */
+ swapgs /* Install the user PDA */
+ movq $0, %rax /* Zero all of the segment registers */
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %fs
+ movl %eax, %gs
+1:
+ movq (%rsp), %r15 /* Unpack the pt_regs struct that */
+ movq 1*8(%rsp), %r14 /* __arch_context_switch() put at the top */
+ movq 2*8(%rsp), %r13 /* of the new task's kernel stack. */
+ movq 3*8(%rsp), %r12
+ movq 4*8(%rsp), %rbp
+ movq 5*8(%rsp), %rbx
+ movq 6*8(%rsp), %r11
+ movq 7*8(%rsp), %r10
+ movq 8*8(%rsp), %r9
+ movq 9*8(%rsp), %r8
+ movq 10*8(%rsp), %rax
+ movq 11*8(%rsp), %rcx
+ movq 12*8(%rsp), %rdx
+ movq 13*8(%rsp), %rsi
+ movq 14*8(%rsp), %rdi
+ add $128, %rsp /* Bump to point to RIP slot in pt_regs */
+ iretq /* Start the new task running */
+
+END(arch_context_switch)
+
+
+/**
+ * This is the entry point for system calls. Upon entry we are still running
+ * with the user-level stack and the x86_64 CPU control unit has stashed the
+ * user-level RIP in RCX and RFLAGS in R11. External interrupts are diabled.
+ *
+ * The first thing this function does is generate a partial stack frame
+ * containing all caller-saved registers. After this is done, the system call
+ * number (stored in RAX by user-level) is used to index into the system call
+ * table (sys_call_table) and call the handler function. The handler function
+ * is responsible for saving all callee-saved registers... if it is a C
+ * function, callee-saved registers are saved automatically by the compiler.
+ *
+ * Immediately before calling the handler function, the kernel stack looks
+ * like:
+ *
+ * RIP = user-space RIP
+ * ORIG_RAX = system call number, passed from user-space
+ * RDI = ARG0, passed from user-space
+ * RSI = ARG1, passed from user-space
+ * RDX = ARG2, passed from user-space
+ * (junk) = normally RCX, but RCX is clobbered by SYSCALL
+ * RAX = system call number, passed from user-space
+ * R8 = ARG4, passed from user-space
+ * R9 = ARG5, passed from user-space
+ * R10 = ARG3, passed from user-space
+ * RSP -> R11 = user-space RFLAGS
+ *
+ * And the registers are setup as follows:
+ *
+ * RDI = ARG0
+ * RSI = ARG1
+ * RDX = ARG2
+ * RCX = ARG3 (was stored on R10 on entry)
+ * R8 = ARG4
+ * R9 = ARG5
+ * RAX = System call number
+ *
+ * NOTE: RCX and R11 are clobbered by system calls. This is due to the SYSCALL
+ * instruction using RCX and R11 to store RIP and RFLAGS before
+ * transfering control to the kernel. User-level will observe different
+ * values of RCX and R11 after SYSCALL than before.
+ *
+ * NOTE: External interrupts are disabled on entry.
+ */
+ENTRY(asm_syscall)
+ /*
+ * Enter from user-space
+ */
+ swapgs /* Load GS.base with kernel PDA addr */
+ movq %rsp, %gs:pda_oldrsp /* Backup user-space RSP */
+ movq %gs:pda_kernelstack, %rsp /* Load kernel stack */
+
+ /*
+ * Save registers to kernel-stack
+ */
+ subq $10*8, %rsp /* Make room on the stack */
+ movq %rcx, 10*8(%rsp) /* Save user-space RIP */
+ movq %rax, 9*8(%rsp) /* Save syscall # in ORIG_RAX slot */
+ movq %rdi, 8*8(%rsp) /* Save user-space RDI (ARG0) */
+ movq %rsi, 7*8(%rsp) /* Save user-space RSI (ARG1) */
+ movq %rdx, 6*8(%rsp) /* Save user-space RDX (ARG2) */
+ movq %rcx, 5*8(%rsp) /* RCX is clobbered, save anyways */
+ movq %rax, 4*8(%rsp) /* Save user-space RAX (syscall #) */
+ movq %r8, 3*8(%rsp) /* Save user-space R8 (ARG4) */
+ movq %r9, 2*8(%rsp) /* Save user-space R9 (ARG5) */
+ movq %r10, 1*8(%rsp) /* Save user-space R10 (ARG3) */
+ movq %r11, (%rsp) /* Save user-space RFLAGS */
+ sti /* Enable external interrupts */
+
+ /*
+ * Call the system call handler
+ */
+ movq %r10, %rcx /* Per x86_64 C ABI, RCX holds ARG3 */
+ cmp $__NR_syscall_max, %rax /* Make sure syscall # is in range */
+ jg 1f
+ call *sys_call_table(,%rax,8) /* Call the system call handler */
+ jmp 2f
+1:
+ call syscall_not_implemented /* Print error and return */
+2:
+ movq %rax, 4*8(%rsp) /* Save return code in stack frame */
+
+ /* Reschedule, since we're returning to user space */
+ call schedule
+
+ /*
+ * Return to user-space
+ */
+ cli /* Disable external interrupts */
+ movq (%rsp), %r11 /* Restore RFLAGS for SYSRET */
+ movq 1*8(%rsp), %r10 /* Restore user-space R10 (ARG3) */
+ movq 2*8(%rsp), %r9 /* Restore user-space R9 (ARG5) */
+ movq 3*8(%rsp), %r8 /* Restore user-space R8 (ARG4) */
+ movq 4*8(%rsp), %rax /* Return syscall return code */
+ movq 6*8(%rsp), %rdx /* Restore user-space RDX (ARG2) */
+ movq 7*8(%rsp), %rsi /* Restore user-space RSI (ARG1) */
+ movq 8*8(%rsp), %rdi /* Restore user-space RDI (ARG0) */
+ movq 10*8(%rsp), %rcx /* Restore RIP for SYSRET */
+ movq %gs:pda_oldrsp, %rsp /* Restore user-space RSP */
+ swapgs /* Restore user-space GS.base */
+ sysretq /* Return to user-space */
+END(asm_syscall)
+
+
+/**
+ * This is a handler for SYSCALL instructions issued from compatibility mode...
+ * we don't support them.
+ */
+ENTRY(asm_syscall_ignore)
+ mov $-ENOSYS,%eax
+ sysret
+END(asm_syscall_ignore)
+
+
+/**
+ * This is the common entry point for all interrupts.
+ *
+ * Before calling the C handler function, the kernel stack looks like:
+ *
+ * [...]
+ * SS (stack segment selector)
+ * RSP (stack pointer)
+ * RFLAGS (flags register)
+ * CS (code segment selector)
+ * RIP (instruction pointer)
+ * ERROR_CODE (0 for interrupts with no error code)
+ * RDI (this was the vector # on entry, we move to %rsi/ARG1)
+ * RSI
+ * RDX
+ * RCX
+ * RAX
+ * R8
+ * R9
+ * R10
+ * R11
+ * RBX
+ * RBP
+ * R12
+ * R13
+ * R14
+ * RSP -> R15
+ *
+ * And the registers are setup as follows:
+ *
+ * RDI = ARG0: A fully populated 'struct pt_regs *'
+ * RSI = ARG1: The interrupt vector number
+ *
+ * NOTE: External interrupts are disabled on entry.
+ */
+ENTRY(asm_interrupt)
+ cld /* Clear direction flag */
+
+ /*
+ * Save registers to kernel-stack
+ */
+ subq $14*8, %rsp /* Make room on the stack */
+ movq %rsi, 13*8(%rsp)
+ movq 14*8(%rsp), %rsi /* ARG1: the interrupt vector number */
+ movq %rdi, 14*8(%rsp)
+ movq %rdx, 12*8(%rsp)
+ movq %rcx, 11*8(%rsp)
+ movq %rax, 10*8(%rsp)
+ movq %r8, 9*8(%rsp)
+ movq %r9, 8*8(%rsp)
+ movq %r10, 7*8(%rsp)
+ movq %r11, 6*8(%rsp)
+ movq %rbx, 5*8(%rsp)
+ movq %rbp, 4*8(%rsp)
+ movq %r12, 3*8(%rsp)
+ movq %r13, 2*8(%rsp)
+ movq %r14, 1*8(%rsp)
+ movq %r15, (%rsp)
+
+ /*
+ * Load kernel GS if we're coming from user-space
+ */
+ testl $3, CS(%rsp) /* Sets ZF=1 if coming from kspace */
+ je 1f /* If ZF=1, skip installing the PDA */
+ swapgs /* Install the PDA */
+1:
+ /*
+ * Call C code interrupt handler entry point
+ */
+ movq %rsp, %rdi /* ARG0: pointer to 'struct pt_regs' */
+ sti /* Enable external interrupts */
+ call do_interrupt /* Call common C handler */
+ cli /* Disable external interrupts */
+
+ /*
+ * If returning to user-space, reschedule and restore user-space GS
+ */
+ testl $3, CS(%rsp) /* Sets ZF=1 if returning to kspace */
+ je 2f /* If ZF=1, jump forward to 2: below */
+ sti /* Enable external interrupts */
+ call schedule /* Reschedule */
+ cli /* Disable external interrupts */
+ swapgs /* Restore uspace GS register */
+2:
+ /*
+ * Restore registers and return to interrupted program
+ */
+ movq (%rsp), %r15
+ movq 1*8(%rsp), %r14
+ movq 2*8(%rsp), %r13
+ movq 3*8(%rsp), %r12
+ movq 4*8(%rsp), %rbp
+ movq 5*8(%rsp), %rbx
+ movq 6*8(%rsp), %r11
+ movq 7*8(%rsp), %r10
+ movq 8*8(%rsp), %r9
+ movq 9*8(%rsp), %r8
+ movq 10*8(%rsp), %rax
+ movq 11*8(%rsp), %rcx
+ movq 12*8(%rsp), %rdx
+ movq 13*8(%rsp), %rsi
+ movq 14*8(%rsp), %rdi
+ addq $16*8, %rsp
+ iretq
+END(asm_interrupt)
+
+
+/**
+ * This table contains an initial entry point function for each IDT vector.
+ * When an interrupt vector fires, the first instruction executed is at
+ * table[vector].
+ *
+ * This table scheme is necessary because some x86_64 interrupts push an
+ * error code onto the stack and others do not. Additionally, there is no way
+ * for an interrupt handler to determine the interrupt vector that triggered
+ * it. Therefore, the functions in this table push a dummy error code onto
+ * the stack when necessary, always push the vector number, and then call a
+ * common handler (asm_interrupt).
+ *
+ * WARNING: Each function/entry in this table must be <= 16 bytes.
+ * Be very careful when adding instructions.
+ */
+.align 16
+ENTRY(asm_idtvec_table)
+ vector=0
+ .rept NUM_IDT_ENTRIES
+ .if vector<=7||vector==9||vector==15||vector==16||vector>=18
+ pushq $0 /* push dummy error_code */
+ .endif
+ pushq $vector /* push vector # into RDI slot */
+ jmp asm_interrupt /* call common handler */
+
+ /* Move onto next entry in table*/
+ .align 16
+ vector=vector+1
+ .endr
+END(asm_idtvec_table)
+
+
+/**
+ * Reload gs selector with exception handling.
+ * edi: new selector
+ */
+ENTRY(load_gs_index)
+ CFI_STARTPROC
+ pushf
+ CFI_ADJUST_CFA_OFFSET 8
+ cli
+ swapgs
+gs_change:
+ movl %edi,%gs
+2: mfence /* workaround */
+ swapgs
+ popf
+ CFI_ADJUST_CFA_OFFSET -8
+ ret
+ CFI_ENDPROC
+ENDPROC(load_gs_index)
+
+ .section __ex_table,"a"
+ .align 8
+ .quad gs_change,bad_gs
+ .previous
+ .section .fixup,"ax"
+ /* running with kernelgs */
+bad_gs:
+ swapgs /* switch back to user gs */
+ xorl %eax,%eax
+ movl %eax,%gs
+ jmp 2b
+ .previous
+
+
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
+ * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
+ */
+
+
+#include <lwk/linkage.h>
+#include <lwk/init.h>
+#include <lwk/cpu.h>
+#include <arch/desc.h>
+#include <arch/segment.h>
+#include <arch/pgtable.h>
+#include <arch/page.h>
+#include <arch/msr.h>
+#include <arch/cache.h>
+
+/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
+ * because we need identity-mapped pages.
+ *
+ */
+
+ .text
+ .section .bootstrap.text
+
+#ifdef CONFIG_CRAY_XT
+ .code32
+ .globl startup_32
+startup_32:
+ cld
+ cli
+ movl $(__KERNEL_DS), %eax
+ movl %eax, %ds
+ lgdt gdt32_descr - __START_KERNEL_map
+
+ /* Enable PAE mode and PGE */
+ xorl %eax, %eax
+ btsl $5, %eax /* enable PAE */
+ btsl $7, %eax /* enable PGE */
+ movl %eax, %cr4
+
+ /* Setup early boot page tables */
+ movl $(init_level4_pgt - __START_KERNEL_map), %eax
+ movl %eax, %cr3
+
+ /* Enable Long Mode */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+ xorl %eax, %eax
+ btsl $31, %eax /* enable paging */
+ btsl $0, %eax /* enable protected mode */
+ movl %eax, %cr0
+
+ /* coldstart uses a hard-coded address for real_mode_data */
+ movl $0x90000, %esi
+
+ /*
+ * At this point we're in long mode but 32-bit compatibility mode.
+ * This jump transitions us into true 64-bit mode.
+ */
+ ljmp $__KERNEL_CS, $(startup_64 - __START_KERNEL_map)
+#endif /* CONFIG_CRAY_XT */
+
+ .code64
+ .globl startup_64
+startup_64:
+
+ /*
+ * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
+ * and someone has loaded an identity mapped page table
+ * for us. These identity mapped page tables map all of the
+ * kernel pages and possibly all of memory.
+ *
+ * %esi holds a physical pointer to real_mode_data.
+ *
+ * We come here either directly from a 64bit bootloader, or from
+ * arch/x86_64/boot/compressed/head.S.
+ *
+ * We only come here initially at boot nothing else comes here.
+ *
+ * Since we may be loaded at an address different from what we were
+ * compiled to run at we first fixup the physical addresses in our page
+ * tables and then reload them.
+ */
+
+ /* Compute the delta between the address I am compiled to run at and the
+ * address I am actually running at.
+ */
+ leaq _text(%rip), %rbp
+ subq $_text - __START_KERNEL_map, %rbp
+
+ /* Is the address not 2M aligned? */
+ movq %rbp, %rax
+ andl $~LARGE_PAGE_MASK, %eax
+ testl %eax, %eax
+ jnz bad_address
+
+ /* Is the address too large? */
+ leaq _text(%rip), %rdx
+ movq $PGDIR_SIZE, %rax
+ cmpq %rax, %rdx
+ jae bad_address
+
+ /* Fixup the physical addresses in the page table
+ */
+ addq %rbp, init_level4_pgt + 0(%rip)
+ addq %rbp, init_level4_pgt + (258*8)(%rip)
+ addq %rbp, init_level4_pgt + (511*8)(%rip)
+
+ addq %rbp, level3_ident_pgt + 0(%rip)
+ addq %rbp, level3_kernel_pgt + (510*8)(%rip)
+
+ /* Add an Identity mapping if I am above 1G */
+ leaq _text(%rip), %rdi
+ andq $LARGE_PAGE_MASK, %rdi
+
+ movq %rdi, %rax
+ shrq $PUD_SHIFT, %rax
+ andq $(PTRS_PER_PUD - 1), %rax
+ jz ident_complete
+
+ leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
+ leaq level3_ident_pgt(%rip), %rbx
+ movq %rdx, 0(%rbx, %rax, 8)
+
+ movq %rdi, %rax
+ shrq $PMD_SHIFT, %rax
+ andq $(PTRS_PER_PMD - 1), %rax
+ leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
+ leaq level2_spare_pgt(%rip), %rbx
+ movq %rdx, 0(%rbx, %rax, 8)
+ident_complete:
+
+ /* Fixup the kernel text+data virtual addresses
+ */
+ leaq level2_kernel_pgt(%rip), %rdi
+ leaq 4096(%rdi), %r8
+ /* See if it is a valid page table entry */
+1: testq $1, 0(%rdi)
+ jz 2f
+ addq %rbp, 0(%rdi)
+ /* Go to the next page */
+2: addq $8, %rdi
+ cmp %r8, %rdi
+ jne 1b
+
+ /* Fixup phys_base */
+ addq %rbp, phys_base(%rip)
+
+ addq %rbp, trampoline_level4_pgt + 0(%rip)
+ addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
+#ifdef CONFIG_ACPI_SLEEP
+ addq %rbp, wakeup_level4_pgt + 0(%rip)
+ addq %rbp, wakeup_level4_pgt + (511*8)(%rip)
+#endif
+
+ /* Due to ENTRY(), sometimes the empty space gets filled with
+ * zeros. Better take a jmp than relying on empty space being
+ * filled with 0x90 (nop)
+ */
+ jmp secondary_startup_64
+ENTRY(secondary_startup_64)
+ /*
+ * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
+ * and someone has loaded a mapped page table.
+ *
+ * %esi holds a physical pointer to real_mode_data.
+ *
+ * We come here either from startup_64 (using physical addresses)
+ * or from trampoline.S (using virtual addresses).
+ *
+ * Using virtual addresses from trampoline.S removes the need
+ * to have any identity mapped pages in the kernel page table
+ * after the boot processor executes this code.
+ */
+
+ /* Enable PAE mode and PGE */
+ xorq %rax, %rax
+ btsq $5, %rax
+ btsq $7, %rax
+ movq %rax, %cr4
+
+ /* Setup early boot stage 4 level pagetables. */
+ movq $(init_level4_pgt - __START_KERNEL_map), %rax
+ addq phys_base(%rip), %rax
+ movq %rax, %cr3
+
+ /* Ensure I am executing from virtual addresses */
+ movq $1f, %rax
+ jmp *%rax
+1:
+
+ /* Check if nx is implemented */
+ movl $0x80000001, %eax
+ cpuid
+ movl %edx,%edi
+
+ /* Setup EFER (Extended Feature Enable Register) */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_SCE, %eax /* Enable System Call */
+ btl $20,%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
+1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+#define CR0_PM 1 /* protected mode */
+#define CR0_MP (1<<1)
+#define CR0_ET (1<<4)
+#define CR0_NE (1<<5)
+#define CR0_WP (1<<16)
+#define CR0_AM (1<<18)
+#define CR0_PAGING (1<<31)
+ movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax
+ /* Make changes effective */
+ movq %rax, %cr0
+
+ /* Setup a boot time stack */
+ movq init_rsp(%rip),%rsp
+
+ /* zero EFLAGS after setting rsp */
+ pushq $0
+ popfq
+
+ /*
+ * We must switch to a new descriptor in kernel space for the GDT
+ * because soon the kernel won't have access anymore to the userspace
+ * addresses where we're currently running on. We have to do that here
+ * because in 32bit we couldn't load a 64bit linear address.
+ */
+ lgdt cpu_gdt_descr(%rip)
+
+ /* set up data segments. actually 0 would do too */
+ movl $__KERNEL_DS,%eax
+ movl %eax,%ds
+ movl %eax,%ss
+ movl %eax,%es
+
+ /*
+ * We don't really need to load %fs or %gs, but load them anyway
+ * to kill any stale realmode selectors. This allows execution
+ * under VT hardware.
+ */
+ movl %eax,%fs
+ movl %eax,%gs
+
+ /*
+ * Setup up a dummy PDA. this is just for some early bootup code
+ * that does in_interrupt()
+ */
+ movl $MSR_GS_BASE,%ecx
+ movq $empty_zero_page,%rax
+ movq %rax,%rdx
+ shrq $32,%rdx
+ wrmsr
+
+ /* esi is pointer to real mode structure with interesting info.
+ pass it to C */
+ movl %esi, %edi
+
+ /* Finally jump to run C code and to be on real kernel address
+ * Since we are running on identity-mapped space we have to jump
+ * to the full 64bit address, this is only possible as indirect
+ * jump. In addition we need to ensure %cs is set so we make this
+ * a far return.
+ */
+ movq initial_code(%rip),%rax
+ pushq $0 # fake return address to stop unwinder
+ pushq $__KERNEL_CS # set correct cs
+ pushq %rax # target address in negative space
+ lretq
+
+ /* SMP bootup changes these two */
+ .align 8
+ .globl initial_code
+initial_code:
+ .quad x86_64_start_kernel
+ .globl init_rsp
+init_rsp:
+ .quad bootstrap_task_union+TASK_SIZE-8
+
+bad_address:
+ jmp bad_address
+
+ENTRY(early_idt_handler)
+ cmpl $2,early_recursion_flag(%rip)
+ jz 1f
+ incl early_recursion_flag(%rip)
+ xorl %eax,%eax
+ movq 8(%rsp),%rsi # get rip
+ movq (%rsp),%rdx
+ movq %cr2,%rcx
+ leaq early_idt_msg(%rip),%rdi
+ call printk
+ cmpl $2,early_recursion_flag(%rip)
+ jz 1f
+// call dump_stack
+1: hlt
+ jmp 1b
+early_recursion_flag:
+ .long 0
+
+early_idt_msg:
+ .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
+early_idt_ripmsg:
+ .asciz "RIP %s\n"
+
+.balign PAGE_SIZE
+
+#define NEXT_PAGE(name) \
+ .balign PAGE_SIZE; \
+ENTRY(name)
+
+/* Automate the creation of 1 to 1 mapping pmd entries */
+#define PMDS(START, PERM, COUNT) \
+ i = 0 ; \
+ .rept (COUNT) ; \
+ .quad (START) + (i << 21) + (PERM) ; \
+ i = i + 1 ; \
+ .endr
+
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+ * 0xffffffff80000000 to physical address 0x000000. (always using
+ * 2Mbyte large pages provided by PAE mode)
+ */
+NEXT_PAGE(init_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .fill 257,8,0
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .fill 252,8,0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+
+NEXT_PAGE(level3_ident_pgt)
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .fill 511,8,0
+
+NEXT_PAGE(level3_kernel_pgt)
+ .fill 510,8,0
+ /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+#ifndef CONFIG_CRAY_XT
+ .fill 1,8,0
+#else
+ .quad level2_seastar_pgt - __START_KERNEL_map + _KERNPG_TABLE
+#endif
+
+NEXT_PAGE(level2_ident_pgt)
+ /* Since I easily can, map the first 1G.
+ * Don't set NX because code runs from these pages.
+ */
+ PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
+
+NEXT_PAGE(level2_kernel_pgt)
+ /* 40MB kernel mapping. The kernel code cannot be bigger than that.
+ When you change this change KERNEL_TEXT_SIZE in page.h too. */
+ /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
+ PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE)
+ /* Module mapping starts here */
+ .fill (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0
+
+#ifdef CONFIG_CRAY_XT
+NEXT_PAGE(level2_seastar_pgt)
+ .fill 511,8,0
+ .quad 0x00000000ffe00193
+#endif
+
+NEXT_PAGE(level2_spare_pgt)
+ .fill 512,8,0
+
+#undef PMDS
+#undef NEXT_PAGE
+
+ .data
+ .align 16
+ .globl cpu_gdt_descr
+cpu_gdt_descr:
+ .word gdt_end-cpu_gdt_table-1
+gdt:
+ .quad cpu_gdt_table
+ .rept NR_CPUS-1
+ .word 0
+ .quad 0
+ .endr
+
+#ifdef CONFIG_CRAY_XT
+ .align 16
+ .globl gdt32_descr
+gdt32_descr:
+ .word gdt_end-cpu_gdt_table-1
+ .long cpu_gdt_table-__START_KERNEL_map
+#endif
+
+ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+/* We need valid kernel segments for data and code in long mode too
+ * IRET will check the segment types kkeil 2000/10/28
+ * Also sysret mandates a special GDT layout
+ */
+
+ .section .data.page_aligned, "aw"
+ .align PAGE_SIZE
+
+/* The TLS descriptors are currently at a different place compared to i386.
+ Hopefully nobody expects them at a fixed place (Wine?) */
+
+ENTRY(cpu_gdt_table)
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+ .quad 0x00cffb000000ffff /* __USER32_CS */
+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
+ .quad 0x00affb000000ffff /* __USER_CS */
+ .quad 0x0 /* unused */
+ .quad 0,0 /* TSS */
+ .quad 0,0 /* LDT */
+ .quad 0,0,0 /* three TLS descriptors */
+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
+gdt_end:
+ /* asm/segment.h:GDT_ENTRIES must match this */
+ /* This should be a multiple of the cache line size */
+ /* GDTs of other CPUs are now dynamically allocated */
+
+ /* zero the remaining page */
+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
+
+ .section .bss, "aw", @nobits
+ .align L1_CACHE_BYTES
+ENTRY(idt_table)
+ .skip 256 * 16
+
+ .section .bss.page_aligned, "aw", @nobits
+ .align PAGE_SIZE
+ENTRY(empty_zero_page)
+ .skip PAGE_SIZE
--- /dev/null
+#include <lwk/init.h>
+#include <lwk/kernel.h>
+#include <lwk/string.h>
+#include <lwk/screen_info.h>
+#include <lwk/params.h>
+#include <lwk/smp.h>
+#include <lwk/cpuinfo.h>
+#include <arch/bootsetup.h>
+#include <arch/sections.h>
+#include <arch/pda.h>
+#include <arch/processor.h>
+#include <arch/desc.h>
+#include <arch/proto.h>
+#include <arch/page.h>
+#include <arch/pgtable.h>
+#include <arch/tlbflush.h>
+
+/**
+ * Data passed to the kernel by the bootloader.
+ *
+ * NOTE: This is marked as __initdata so it goes away after the
+ * kernel bootstrap process is complete.
+ */
+char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
+
+/**
+ * Interrupt Descriptor Table (IDT) descriptor.
+ *
+ * This descriptor contains the length of the IDT table and a
+ * pointer to the table. The lidt instruction (load IDT) requires
+ * this format.
+ */
+struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
+
+/**
+ * Array of pointers to each CPU's per-processor data area.
+ * The array is indexed by CPU ID.
+ */
+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
+
+/**
+ * Array of per-processor data area structures, one per CPU.
+ * The array is indexed by CPU ID.
+ */
+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
+
+/**
+ * This unmaps virtual addresses [0,512GB) by clearing the first entry in the
+ * PGD/PML4T. After this executes, accesses to virtual addresses [0,512GB) will
+ * cause a page fault.
+ */
+static void __init
+zap_identity_mappings(void)
+{
+ pgd_t *pgd = pgd_offset_k(0UL);
+ pgd_clear(pgd);
+ __flush_tlb();
+}
+
+/**
+ * Determines the address of the kernel boot command line.
+ */
+static char * __init
+find_command_line(void)
+{
+ unsigned long new_data;
+
+ new_data = *(u32 *) (x86_boot_params + NEW_CL_POINTER);
+ if (!new_data) {
+ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
+ return NULL;
+ }
+ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
+ }
+ return __va(new_data);
+}
+
+/**
+ * This is the initial C entry point to the kernel.
+ * NOTE: The order of operations is usually important. Be careful.
+ */
+void __init
+x86_64_start_kernel(char * real_mode_data)
+{
+ int i;
+
+ /*
+ * Zero the "Block Started by Symbol" section...
+ * you know, the one that holds uninitialized data.
+ */
+ memset(__bss_start, 0,
+ (unsigned long) __bss_stop - (unsigned long) __bss_start);
+
+ /*
+ * Make NULL pointer dereferences segfault.
+ */
+ zap_identity_mappings();
+
+ /*
+ * Setup the initial interrupt descriptor table (IDT).
+ * This will be eventually be populated with the real handlers.
+ */
+ for (i = 0; i < 256; i++)
+ set_intr_gate(i, early_idt_handler);
+ asm volatile("lidt %0" :: "m" (idt_descr));
+
+ /*
+ * Early per-processor data area (PDA) initialization.
+ */
+ for (i = 0; i < NR_CPUS; i++)
+ cpu_pda(i) = &boot_cpu_pda[i];
+ pda_init(0, &bootstrap_task_union.task_info);
+
+ /*
+ * Make a copy data passed by the bootloader.
+ * real_mode_data will get clobbered eventually when the memory
+ * subsystem is initialized.
+ */
+ memcpy(x86_boot_params, __va(real_mode_data), sizeof(x86_boot_params));
+ memcpy(lwk_command_line, find_command_line(), sizeof(lwk_command_line));
+
+ /*
+ * Tell the VGA driver the starting line number... this avoids
+ * overwriting BIOS and bootloader messages.
+ */
+ param_set_by_name_int("vga.row", SCREEN_INFO.orig_y);
+
+ /*
+ * Okay... we've done the bare essentials. Call into the
+ * platform-independent bootstrap function. This will in turn
+ * call back into architecture dependent code to do things like
+ * initialize interrupts and boot CPUs.
+ */
+ start_kernel();
+}
+
--- /dev/null
+/*
+ * linux/arch/x86_64/kernel/i387.c
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ * Copyright (C) 2002 Andi Kleen, SuSE Labs
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * x86-64 rework 2002 Andi Kleen.
+ * Does direct fxsave in and out of user space now for signal handlers.
+ * All the FSAVE<->FXSAVE conversion code has been moved to the 32bit emulation,
+ * the 64bit user space sees a FXSAVE frame directly.
+ */
+
+#include <lwk/task.h>
+#include <lwk/init.h>
+#include <arch/processor.h>
+#include <arch/i387.h>
+#include <arch/sigcontext.h>
+#include <arch/user.h>
+#include <arch/ptrace.h>
+#include <arch/uaccess.h>
+
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffff;
+
+void mxcsr_feature_mask_init(void)
+{
+ unsigned int mask;
+ clts();
+ memset(¤t->arch.thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct));
+ asm volatile("fxsave %0" : : "m" (current->arch.thread.i387.fxsave));
+ mask = current->arch.thread.i387.fxsave.mxcsr_mask;
+ if (mask == 0) mask = 0x0000ffbf;
+ mxcsr_feature_mask &= mask;
+ stts();
+}
+
+/*
+ * Called at bootup to set up the initial FPU state that is later cloned
+ * into all processes.
+ */
+void __cpuinit fpu_init(void)
+{
+ unsigned long oldcr0 = read_cr0();
+ extern void __bad_fxsave_alignment(void);
+
+ if (offsetof(struct task_struct, arch.thread.i387.fxsave) & 15)
+ __bad_fxsave_alignment();
+ set_in_cr4(X86_CR4_OSFXSR); /* enable fast FPU state save/restore */
+ set_in_cr4(X86_CR4_OSXMMEXCPT); /* enable unmasked SSE exceptions */
+
+ write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
+
+ mxcsr_feature_mask_init();
+ /* clean state in init */
+ current->arch.flags = 0;
+ clear_used_math();
+}
+
--- /dev/null
+#include <lwk/task.h>
+#include <lwk/init_task.h>
+#include <lwk/percpu.h>
+#include <lwk/aspace.h>
+#include <arch/processor.h>
+
+struct aspace bootstrap_aspace = {
+ BOOTSTRAP_ASPACE(bootstrap_aspace)
+ .arch = {
+ .pgd = (xpte_t *) init_level4_pgt
+ }
+};
+
+union task_union bootstrap_task_union
+ __attribute__((__section__(".data.bootstrap_task"))) =
+ {
+ /* Initialize task_union.task_info */
+ {
+ /* arch independent portion */
+ BOOTSTRAP_TASK(bootstrap_task_union.task_info)
+
+ /* x86_64 specific portion */
+ .arch = {
+ .addr_limit = PAGE_OFFSET
+ }
+ }
+ };
+
+/**
+ * Each CPU gets its own Task State Segment (TSS) structure. Tasks are
+ * completely 'soft' in the LWK, no more per-task TSS's and hardware task
+ * switching... we switch tasks completely in software. The TSS size is kept
+ * cacheline-aligned so they are allowed to end up in the
+ * .data.cacheline_aligned section. Since TSS's are completely CPU-local, we
+ * want them on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+DEFINE_PER_CPU(struct tss_struct, tss)
+____cacheline_internodealigned_in_smp = BOOTSTRAP_TSS;
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/kallsyms.h>
+#include <lwk/task.h>
+#include <lwk/sched.h>
+#include <lwk/timer.h>
+#include <arch/desc.h>
+#include <arch/idt_vectors.h>
+#include <arch/show.h>
+#include <arch/xcall.h>
+#include <arch/i387.h>
+
+typedef void (*idtvec_handler_t)(struct pt_regs *regs, unsigned int vector);
+
+idtvec_handler_t idtvec_table[NUM_IDT_ENTRIES];
+
+extern void asm_idtvec_table(void);
+
+void
+do_unhandled_idt_vector(struct pt_regs *regs, unsigned int vector)
+{
+ if ((vector >= IRQ0_VECTOR) && (vector <= IRQ15_VECTOR)) {
+ printk(KERN_EMERG
+ "Unhandled Interrupt! (vector=%u, isa_irq=%u)\n",
+ vector, vector - IRQ0_VECTOR);
+ } else {
+ printk(KERN_EMERG
+ "Unhandled Interrupt! (vector=%u)\n", vector);
+ }
+}
+
+void
+do_divide_error(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Divide Error Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_nmi(struct pt_regs *regs, unsigned int vector)
+{
+ printk("NMI Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_int3(struct pt_regs *regs, unsigned int vector)
+{
+ printk("INT3 Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_overflow(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Overflow Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_bounds(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Bounds Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_invalid_op(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Invalid Op Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_device_not_available(struct pt_regs *regs, unsigned int vector)
+{
+ BUG_ON(current->arch.flags & TF_USED_FPU);
+ current->arch.flags |= TF_USED_FPU;
+ clts();
+ fpu_restore_state(current);
+}
+
+void
+do_double_fault(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Double Fault Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_coproc_segment_overrun(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Coprocessor Segment Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_invalid_tss(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Invalid TSS Exception)\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_segment_not_present(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Segment Not Present Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_stack_segment(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Stack Segment Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_general_protection(struct pt_regs *regs, unsigned int vector)
+{
+ printk("General Protection Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_page_fault(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Page Fault Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_spurious_interrupt_bug(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Spurious Interrupt Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_coprocessor_error(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Coprocessor Error Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_alignment_check(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Alignment Check Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_machine_check(struct pt_regs *regs, unsigned int vector)
+{
+ printk("Machine Check Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_simd_coprocessor_error(struct pt_regs *regs, unsigned int vector)
+{
+ printk("SIMD Coprocessor Error Exception\n");
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_apic_timer(struct pt_regs *regs, unsigned int vector)
+{
+ expire_timers();
+}
+
+void
+do_apic_perf_counter(struct pt_regs *regs, unsigned int vector)
+{
+ printk("APIC Perf. Counter Interrupt, vector=%u\n", vector);
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_apic_thermal(struct pt_regs *regs, unsigned int vector)
+{
+ printk("APIC Thermal Interrupt, vector=%u\n", vector);
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_apic_error(struct pt_regs *regs, unsigned int vector)
+{
+ printk("APIC Error Interrupt, vector=%u\n", vector);
+ show_registers(regs);
+ while (1) {}
+}
+
+void
+do_apic_spurious(struct pt_regs *regs, unsigned int vector)
+{
+ printk("APIC Spurious Interrupt, vector=%u\n", vector);
+ show_registers(regs);
+ while (1) {}
+}
+
+void __init
+set_idtvec_handler(unsigned int vector, idtvec_handler_t handler)
+{
+ char namebuf[KSYM_NAME_LEN+1];
+ unsigned long symsize, offset;
+
+ ASSERT(vector < NUM_IDT_ENTRIES);
+
+ if (handler != &do_unhandled_idt_vector) {
+ printk(KERN_DEBUG "IDT Vector %3u -> %s()\n",
+ vector, kallsyms_lookup( (unsigned long)handler,
+ &symsize, &offset, namebuf )
+ );
+ }
+
+ idtvec_table[vector] = handler;
+}
+
+void
+do_interrupt(struct pt_regs *regs, unsigned int vector)
+{
+ idtvec_table[vector](regs, vector);
+ if (vector >= FIRST_EXTERNAL_VECTOR)
+ lapic_ack_interrupt();
+}
+
+void __init
+interrupts_init(void)
+{
+ int vector;
+
+ /*
+ * Initialize the Interrupt Descriptor Table (IDT).
+ */
+ for (vector = 0; vector < NUM_IDT_ENTRIES; vector++) {
+ void *asm_handler = (void *) (
+ (unsigned long)(&asm_idtvec_table) + (vector * 16)
+ );
+ set_intr_gate(vector, asm_handler);
+ set_idtvec_handler(vector, &do_unhandled_idt_vector);
+ }
+
+ /*
+ * Register handlers for the standard x86_64 interrupts & exceptions.
+ */
+ set_idtvec_handler( DIVIDE_ERROR_VECTOR, &do_divide_error );
+ set_idtvec_handler( NMI_VECTOR, &do_nmi );
+ set_idtvec_handler( INT3_VECTOR, &do_int3 );
+ set_idtvec_handler( OVERFLOW_VECTOR, &do_overflow );
+ set_idtvec_handler( BOUNDS_VECTOR, &do_bounds );
+ set_idtvec_handler( INVALID_OP_VECTOR, &do_invalid_op );
+ set_idtvec_handler( DEVICE_NOT_AVAILABLE_VECTOR, &do_device_not_available );
+ set_idtvec_handler( DOUBLE_FAULT_VECTOR, &do_double_fault );
+ set_idtvec_handler( COPROC_SEGMENT_OVERRUN_VECTOR, &do_coproc_segment_overrun );
+ set_idtvec_handler( INVALID_TSS_VECTOR, &do_invalid_tss );
+ set_idtvec_handler( SEGMENT_NOT_PRESENT_VECTOR, &do_segment_not_present );
+ set_idtvec_handler( STACK_SEGMENT_VECTOR, &do_stack_segment );
+ set_idtvec_handler( GENERAL_PROTECTION_VECTOR, &do_general_protection );
+ set_idtvec_handler( PAGE_FAULT_VECTOR, &do_page_fault );
+ set_idtvec_handler( SPURIOUS_INTERRUPT_BUG_VECTOR, &do_spurious_interrupt_bug );
+ set_idtvec_handler( COPROCESSOR_ERROR_VECTOR, &do_coprocessor_error );
+ set_idtvec_handler( ALIGNMENT_CHECK_VECTOR, &do_alignment_check );
+ set_idtvec_handler( MACHINE_CHECK_VECTOR, &do_machine_check );
+ set_idtvec_handler( SIMD_COPROCESSOR_ERROR_VECTOR, &do_simd_coprocessor_error );
+
+ /*
+ * Register handlers for all of the local APIC vectors.
+ */
+ set_idtvec_handler( APIC_TIMER_VECTOR, &do_apic_timer );
+ set_idtvec_handler( APIC_PERF_COUNTER_VECTOR, &do_apic_perf_counter );
+ set_idtvec_handler( APIC_THERMAL_VECTOR, &do_apic_thermal );
+ set_idtvec_handler( APIC_ERROR_VECTOR, &do_apic_error );
+ set_idtvec_handler( APIC_SPURIOUS_VECTOR, &do_apic_spurious );
+
+ /*
+ * Register handlers for inter-CPU interrupts (cross calls).
+ */
+ set_idtvec_handler( XCALL_FUNCTION_VECTOR, &arch_xcall_function_interrupt );
+ set_idtvec_handler( XCALL_RESCHEDULE_VECTOR, &arch_xcall_reschedule_interrupt );
+}
+
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/resource.h>
+#include <lwk/bootmem.h>
+#include <lwk/spinlock.h>
+#include <lwk/cpuinfo.h>
+#include <arch/io.h>
+#include <arch/pgtable.h>
+#include <arch/fixmap.h>
+#include <arch/apicdef.h>
+#include <arch/io_apic.h>
+#include <arch/idt_vectors.h>
+
+/**
+ * Lock that protects access to the IO APICs in the system.
+ * There is only one lock for all IO APICs.
+ */
+static DEFINE_SPINLOCK(ioapic_lock);
+
+/**
+ * Number of IO APICs in the system.
+ */
+unsigned int ioapic_num;
+
+/**
+ * Array containing the IDs of the IO APICs in the system.
+ * The array is indexed by ioapic_index.
+ */
+unsigned int ioapic_id[MAX_IO_APICS];
+
+/**
+ * Addresses of the IO APICs in the system.
+ * The array is indexed by ioapic_index.
+ */
+unsigned long ioapic_phys_addr[MAX_IO_APICS];
+
+/**
+ * Resource entries for the IO APIC memory mapping.
+ */
+static struct resource *ioapic_resources;
+
+/**
+ * Structure used to map IO APIC registers.
+ */
+struct ioapic {
+ uint32_t index;
+ uint32_t unused[3];
+ uint32_t data;
+};
+
+/**
+ * Union used to map an IO APIC routing entry register.
+ */
+union ioapic_entry_union {
+ struct { uint32_t low_word, high_word; };
+ struct IO_APIC_route_entry entry;
+};
+
+/**
+ * Returns the base kernel virtual address of the specified IO APIC's
+ * kernel mapping.
+ */
+static struct ioapic *
+ioapic_base_addr(int ioapic_index)
+{
+ return (void *) __fix_to_virt(FIX_IO_APIC_BASE_0 + ioapic_index)
+ + (ioapic_phys_addr[ioapic_index] & ~PAGE_MASK);
+}
+
+/**
+ * Reads a value from an IO APIC register.
+ */
+static uint32_t
+ioapic_read(unsigned int ioapic_index, uint32_t reg)
+{
+ struct ioapic *ioapic = ioapic_base_addr(ioapic_index);
+ writel(reg, &ioapic->index);
+ return readl(&ioapic->data);
+}
+
+/**
+ * Writes a value to an IO APIC register.
+ */
+static void
+ioapic_write(unsigned int ioapic_index, uint32_t reg, uint32_t value)
+{
+ struct ioapic *ioapic = ioapic_base_addr(ioapic_index);
+ writel(reg, &ioapic->index);
+ writel(value, &ioapic->data);
+}
+
+/**
+ * Reads an IO APIC pin routing entry.
+ */
+static struct IO_APIC_route_entry
+ioapic_read_pin(
+ unsigned int ioapic_index,
+ unsigned int pin
+)
+{
+ union ioapic_entry_union eu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ eu.low_word = ioapic_read(ioapic_index, 0x10 + 2 * pin);
+ eu.high_word = ioapic_read(ioapic_index, 0x11 + 2 * pin);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ return eu.entry;
+}
+
+/**
+ * Writes an IO APIC pin routing entry.
+ *
+ * When we write a new IO APIC routing entry, we need to write the high word
+ * first. This is because the mask/enable bit is in the low word and we do not
+ * want to enable the entry before it is fully populated.
+ */
+static void
+ioapic_write_pin(
+ unsigned int ioapic_index,
+ unsigned int pin,
+ struct IO_APIC_route_entry pin_config
+)
+{
+ union ioapic_entry_union eu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ eu.entry = pin_config;
+ ioapic_write(ioapic_index, 0x11 + 2 * pin, eu.high_word);
+ ioapic_write(ioapic_index, 0x10 + 2 * pin, eu.low_word);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+/**
+ * Masks (disables) an IO APIC input pin.
+ */
+static void
+ioapic_mask_pin(
+ unsigned int ioapic_index,
+ unsigned int pin
+)
+{
+ struct IO_APIC_route_entry pin_config =
+ ioapic_read_pin(ioapic_index, pin);
+ pin_config.mask = 1;
+ ioapic_write_pin(ioapic_index, pin, pin_config);
+}
+
+/**
+ * Unmasks (enables) an IO APIC input pin.
+ */
+static void
+ioapic_unmask_pin(
+ unsigned int ioapic_index,
+ unsigned int pin
+)
+{
+ struct IO_APIC_route_entry pin_config =
+ ioapic_read_pin(ioapic_index, pin);
+ pin_config.mask = 0;
+ ioapic_write_pin(ioapic_index, pin, pin_config);
+}
+
+/**
+ * Returns the number of input pins provided by the specified IO APIC.
+ */
+static unsigned int
+ioapic_get_num_pins(unsigned int ioapic_index)
+{
+ union IO_APIC_reg_01 reg_01;
+
+ reg_01.raw = ioapic_read(ioapic_index, 1);
+ return reg_01.bits.entries + 1;
+}
+
+/**
+ * Initializes the primary IO APIC (the one connected to the ISA IRQs).
+ */
+static void __init
+ioapic_init_primary(
+ unsigned int ioapic_index
+)
+{
+ unsigned int pin;
+ unsigned int num_pins = ioapic_get_num_pins(ioapic_index);
+ struct IO_APIC_route_entry cfg;
+
+ if (num_pins != 24)
+ panic("Expected IOAPIC to have 24 pins, has %u.", num_pins);
+
+ /* Mask (disable) all pins */
+ for (pin = 0; pin < num_pins; pin++) {
+ ioapic_mask_pin(ioapic_index, pin);
+ }
+
+ /*
+ * Configure ISA IRQs.
+ * (Assuming pins [1,15] are the standard ISA IRQs)
+ * (Assuming pin 2 is hooked to the timer interrupt)
+ * (Assuming pin 0 is hooked to the old i8259 PIC... don't use it)
+ */
+ for (pin = 1; pin <= 15; pin++) {
+ cfg = ioapic_read_pin(ioapic_index, pin);
+
+ cfg.delivery_mode = ioapic_fixed;
+ cfg.dest_mode = ioapic_physical_dest;
+ cfg.polarity = (pin == 8)
+ ? ioapic_active_low
+ : ioapic_active_high;
+ cfg.trigger = ioapic_edge_sensitive;
+ cfg.dest = (uint8_t) cpu_info[0].physical_id;
+ cfg.vector = IRQ0_VECTOR + pin;
+
+ ioapic_write_pin(ioapic_index, pin, cfg);
+ }
+
+ /*
+ * Configure PCI IRQs.
+ * (Assuming pins [16,19] are PCI INTA, INTB, INTC, and INTD)
+ */
+ for (pin = 16; pin <= 19; pin++) {
+ cfg = ioapic_read_pin(ioapic_index, pin);
+
+ cfg.delivery_mode = ioapic_fixed;
+ cfg.dest_mode = ioapic_physical_dest;
+ cfg.polarity = ioapic_active_low;
+ cfg.trigger = ioapic_level_sensitive;
+ cfg.dest = (uint8_t) cpu_info[0].physical_id;
+ cfg.vector = IRQ0_VECTOR + pin;
+
+ ioapic_write_pin(ioapic_index, pin, cfg);
+ }
+
+ /* Unmask (enable) all of the pins that have been configured */
+ for (pin = 1; pin < 19; pin++) {
+ ioapic_unmask_pin(ioapic_index, pin);
+ }
+}
+
+/**
+ * Creates a kernel mapping for all IO APICs in the system.
+ */
+void __init
+ioapic_map(void)
+{
+ unsigned int i;
+ const int name_size = 16;
+ char *name;
+
+ if (ioapic_num == 0)
+ return;
+
+ /*
+ * Allocate enough memory for one resource structure per detected IO
+ * APIC in the system. Memory for the resource name strings is tacked
+ * onto the end of the allocation (name_size*ioapic_num bytes).
+ */
+ ioapic_resources = alloc_bootmem(ioapic_num *
+ (sizeof(struct resource) + name_size));
+ name = ((char *)ioapic_resources) + ioapic_num*sizeof(struct resource);
+
+ for (i = 0; i < ioapic_num; i++) {
+ /* Reserve the physical memory used by the IO APIC */
+ sprintf(name, "IO APIC %u", i);
+ ioapic_resources[i].name = name;
+ ioapic_resources[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ ioapic_resources[i].start = ioapic_phys_addr[i];
+ ioapic_resources[i].end = ioapic_phys_addr[i] + 4096 - 1;
+ request_resource(&iomem_resource, &ioapic_resources[i]);
+ name += name_size;
+
+ /* Map the IO APIC into the kernel */
+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + i, ioapic_phys_addr[i]);
+
+ printk(KERN_DEBUG
+ "IO APIC mapped to virtual address 0x%016lx\n",
+ __fix_to_virt(FIX_IO_APIC_BASE_0 + i)
+ );
+ }
+}
+
+/**
+ * Initializes all IO APICs in the system.
+ */
+void __init
+ioapic_init(void)
+{
+ if (ioapic_num == 0)
+ return;
+
+/* TODO: FIX THIS... NEED TO PARSE MPTABLE OR SOMETHING ELSE */
+#ifdef CONFIG_PC
+ /* TODO: For now, only initializes the first one. */
+ ioapic_init_primary(0);
+ ioapic_dump();
+#endif
+}
+
+/**
+ * Dumps the current state of all IO APICs in the system.
+ */
+void __init
+ioapic_dump(void)
+{
+ unsigned int ioapic_index, pin;
+ union IO_APIC_reg_00 reg_00;
+ union IO_APIC_reg_01 reg_01;
+ union IO_APIC_reg_02 reg_02;
+ struct IO_APIC_route_entry entry;
+ unsigned long flags;
+
+ for (ioapic_index = 0; ioapic_index < ioapic_num; ioapic_index++) {
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ reg_00.raw = ioapic_read(ioapic_index, 0);
+ reg_01.raw = ioapic_read(ioapic_index, 1);
+ if (reg_01.bits.version >= 0x10)
+ reg_02.raw = ioapic_read(ioapic_index, 2);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+ printk(KERN_DEBUG "Dump of IO APIC %u (physical id %u):\n",
+ ioapic_index, ioapic_id[ioapic_index]);
+ printk(KERN_DEBUG " register #00: %08X\n", reg_00.raw);
+ printk(KERN_DEBUG " physical APIC id: %02u\n", reg_00.bits.ID);
+ printk(KERN_DEBUG " register #01: %08X\n", *(int *)®_01);
+ printk(KERN_DEBUG " max redirection entries: %04X\n", reg_01.bits.entries);
+ printk(KERN_DEBUG " PRQ implemented: %X\n", reg_01.bits.PRQ);
+ printk(KERN_DEBUG " IO APIC version: %04X\n", reg_01.bits.version);
+ if (reg_01.bits.version >= 0x10) {
+ printk(KERN_DEBUG " register #02: %08X\n", reg_02.raw);
+ printk(KERN_DEBUG " arbitration: %02X\n", reg_02.bits.arbitration);
+ }
+
+ printk(KERN_DEBUG " Interrupt Redirection Table:\n");
+ for (pin = 0; pin <= reg_01.bits.entries; pin++) {
+ entry = ioapic_read_pin(ioapic_index, pin);
+ printk(KERN_DEBUG
+ " %02u: vector=%u dest=%03u mask=%1d "
+ "trigger=%1d irr=%1d polarity=%1d\n",
+ pin, entry.vector, entry.dest, entry.mask,
+ entry.trigger, entry.irr, entry.polarity);
+ printk(KERN_DEBUG
+ " dest_mode=%1d delivery_mode=%1d "
+ "delivery_status=%1d\n",
+ entry.dest_mode, entry.delivery_mode,
+ entry.delivery_status);
+ }
+ }
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/resource.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/smp.h>
+#include <lwk/delay.h>
+#include <arch/page.h>
+#include <arch/pgtable.h>
+#include <arch/fixmap.h>
+#include <arch/apicdef.h>
+#include <arch/apic.h>
+#include <arch/idt_vectors.h>
+#include <arch/tsc.h>
+
+/**
+ * Physical address of the local APIC memory mapping.
+ * If the system BIOS provided an MP configuration table, this is set in
+ * arch/x86_64/kernel/mpparse.c to the value parsed from the table.
+ * Otherwise, the default address is used.
+ */
+unsigned long lapic_phys_addr = APIC_DEFAULT_PHYS_BASE;
+
+/**
+ * Resource entry for the local APIC memory mapping.
+ */
+static struct resource lapic_resource = {
+ .name = "Local APIC",
+ .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+ /* .start and .end filled in later based on detected address */
+};
+
+/**
+ * Creates a kernel mapping for the local APIC.
+ *
+ * The hardware/platform/BIOS maps each CPU's local APIC at the same location
+ * in physical memory. This function uses the 'fixmap' to map the local APIC
+ * into the kernel's virtual memory space at a fixed virtual address that is
+ * known at compile time. Since the local APIC's virtual address is known
+ * at compile time, local APIC registers can be accessed directly, without
+ * any pointer dereferencing.
+ */
+void __init
+lapic_map(void)
+{
+ if (!cpu_has_apic)
+ panic("No local APIC.");
+
+ /* Reserve physical memory used by the local APIC */
+ lapic_resource.start = lapic_phys_addr;
+ lapic_resource.end = lapic_phys_addr + 4096 - 1;
+ request_resource(&iomem_resource, &lapic_resource);
+
+ /* Map local APIC into the kernel */
+ set_fixmap_nocache(FIX_APIC_BASE, lapic_phys_addr);
+
+ printk(KERN_DEBUG "Local APIC mapped to virtual address 0x%016lx\n",
+ fix_to_virt(FIX_APIC_BASE));
+}
+
+/**
+ * Initializes the calling CPU's local APIC.
+ */
+void __init
+lapic_init(void)
+{
+ uint32_t val;
+
+ /*
+ * Initialize Destination Format Register.
+ * When using logical destination mode, we want to use the flat model.
+ */
+ apic_write(APIC_DFR, APIC_DFR_FLAT);
+
+ /*
+ * Initialize the Logical Destination Register.
+ * The LWK never uses logical destination mode, so just set it to the
+ * APIC's physical ID to avoid possible confusion.
+ */
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ val |= SET_APIC_LOGICAL_ID( GET_APIC_ID(apic_read(APIC_ID)) );
+ apic_write(APIC_LDR, val);
+
+ /*
+ * Initialize the Task Priority Register.
+ * We set this to accept all (0) and never touch it again.
+ */
+ val = apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK;
+ apic_write(APIC_TASKPRI, val);
+
+ /*
+ * Intialize the Spurious-Interrupt Vector Register.
+ * This also enables the local APIC.
+ */
+ val = apic_read(APIC_SPIV) & ~APIC_VECTOR_MASK;
+ val |= (APIC_SPIV_APIC_ENABLED | APIC_SPURIOUS_VECTOR);
+ apic_write(APIC_SPIV, val);
+
+ /* Setup LVT[0] = APIC Timer Interrupt */
+ apic_write(APIC_LVTT, 0
+ | APIC_DM_FIXED /* route to fixed IDT vector */
+ | APIC_TIMER_VECTOR /* IDT vector to route to */
+ | APIC_LVT_MASKED /* initially disable */
+ );
+
+ /* Setup LVT[1] = Thermal Sensor Interrupt */
+ apic_write(APIC_LVTTHMR, 0
+ | APIC_DM_FIXED /* route to fixed IDT vector */
+ | APIC_THERMAL_VECTOR /* IDT vector to route to */
+ );
+
+ /* Setup LVT[2] = Performance Counter Interrupt */
+ apic_write(APIC_LVTPC, 0
+ | APIC_DM_NMI /* treat as non-maskable interrupt */
+ /* NMIs are routed to IDT vector 2 */
+ | APIC_LVT_MASKED /* initially disable */
+ );
+
+ /* Setup LVT[3] = Local Interrupt Pin 0 */
+ apic_write(APIC_LVT0, 0
+ | APIC_DM_EXTINT /* hooked up to old 8259A PIC */
+ /* IDT vector provided by 8259A */
+ | APIC_LVT_MASKED /* disable */
+ );
+
+ /* Setup LVT[4] = Local Interrupt Pin 1 */
+ apic_write(APIC_LVT1, 0
+ | APIC_DM_NMI /* treat as non-maskable interrupt */
+ /* NMIs are routed to IDT vector 2 */
+ | ((this_cpu != 0)
+ ? APIC_LVT_MASKED /* mask on all but bootstrap CPU */
+ : 0) /* bootstrap CPU (0) receives NMIs */
+ );
+
+ /* Setup LVT[5] = Internal APIC Error Detector Interrupt */
+ apic_write(APIC_LVTERR, 0
+ | APIC_DM_FIXED /* route to fixed IDT vector */
+ | APIC_ERROR_VECTOR /* IDT vector to route to */
+ );
+ apic_write(APIC_ESR, 0); /* spec says to clear after enabling LVTERR */
+}
+
+void
+lapic_set_timer(uint32_t count)
+{
+ uint32_t lvt;
+
+ /* Setup Divide Count Register to use the bus frequency directly. */
+ apic_write(APIC_TDCR, APIC_TDR_DIV_1);
+
+ /* Program the initial count register */
+ apic_write(APIC_TMICT, count);
+
+ /* Enable the local APIC timer */
+ lvt = apic_read(APIC_LVTT);
+ lvt &= ~APIC_LVT_MASKED;
+ lvt |= APIC_LVT_TIMER_PERIODIC;
+ apic_write(APIC_LVTT, lvt);
+}
+
+void
+lapic_stop_timer(void)
+{
+ uint32_t lvt;
+
+ /* Set the initial count to 0 */
+ apic_write(APIC_TMICT, 0);
+
+ /* Enable the local APIC timer */
+ lvt = apic_read(APIC_LVTT);
+ lvt |= APIC_LVT_MASKED;
+ apic_write(APIC_LVTT, lvt);
+}
+
+/**
+ * Detects the local APIC reference bus clock. The only sure-fire way to do
+ * this is to depend on some other absolute timing source. This function uses
+ * the CPU's cycle counter and the previously detected CPU clock frequency.
+ *
+ * NOTE: This assumes that the CPU's clock frequency has already been detected.
+ * (i.e., cpu_info[cpu_id()].arch.tsc_khz has been initialized.
+ */
+unsigned int __init
+lapic_calibrate_timer(void)
+{
+ const unsigned int tick_count = 100000000;
+ cycles_t tsc_start, tsc_now;
+ uint32_t apic_start, apic_now;
+ unsigned int apic_Hz;
+
+ /* Start the APIC counter running for calibration */
+ lapic_set_timer(4000000000);
+
+ apic_start = apic_read(APIC_TMCCT);
+ tsc_start = get_cycles_sync();
+
+ /* Spin until enough ticks for a meaningful result have elapsed */
+ do {
+ apic_now = apic_read(APIC_TMCCT);
+ tsc_now = get_cycles_sync();
+ } while ( ((tsc_now - tsc_start) < tick_count) &&
+ ((apic_start - apic_now) < tick_count) );
+
+ apic_Hz = (apic_start - apic_now) * 1000L *
+ cpu_info[this_cpu].arch.tsc_khz / (tsc_now - tsc_start);
+
+ lapic_stop_timer();
+
+ return (apic_Hz / 1000);
+}
+
+static uint32_t
+lapic_wait4_icr_idle(void)
+{
+ uint32_t send_status;
+ int timeout;
+
+ /* Wait up to 100 milliseconds */
+ timeout = 0;
+ do {
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ if (!send_status)
+ break;
+ udelay(100);
+ } while (timeout++ < 1000);
+
+ return send_status;
+}
+
+/**
+ * Returns the number of entries in the Local Vector Table minus one.
+ *
+ * This should return 5 or higher on all x86_64 CPUs.
+ * 6 is returned if the APIC Thermal Interrupt is supported, 5 otherwise.
+ */
+static uint32_t
+lapic_get_maxlvt(void)
+{
+ return GET_APIC_MAXLVT(apic_read(APIC_LVR));
+}
+
+/**
+ * Sends an INIT inter-processor interrupt.
+ * This is used during bootstrap to wakeup the AP CPUs.
+ */
+void __init
+lapic_send_init_ipi(unsigned int cpu)
+{
+ uint32_t status;
+ unsigned int apic_id = cpu_info[cpu].arch.apic_id;
+
+ /* Turn on INIT at target CPU */
+ apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apic_id));
+ apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
+ | APIC_DM_INIT);
+ status = lapic_wait4_icr_idle();
+ if (status)
+ panic("INIT IPI ERROR: failed to assert INIT. (%x)", status);
+ mdelay(10);
+
+ /* Turn off INIT at target CPU */
+ apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apic_id));
+ apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+ status = lapic_wait4_icr_idle();
+ if (status)
+ panic("INIT IPI ERROR: failed to deassert INIT. (%x)", status);
+}
+
+/**
+ * Send a STARTUP inter-processor interrupt.
+ * This is used during bootstrap to wakeup the AP CPUs.
+ */
+void __init
+lapic_send_startup_ipi(
+ unsigned int cpu, /* Logical CPU ID */
+ unsigned long start_rip /* Physical addr */
+)
+{
+ uint32_t status;
+ unsigned int maxlvt = lapic_get_maxlvt();
+ unsigned int apic_id = cpu_info[cpu].arch.apic_id;
+
+ /* Clear errors */
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+
+ /* Set target CPU */
+ apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apic_id));
+
+ /* Send Startup IPI to target CPU */
+ apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apic_id));
+ apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
+ udelay(300); /* Give AP CPU some time to accept the IPI */
+ status = lapic_wait4_icr_idle();
+ if (status)
+ panic("STARTUP IPI ERROR: failed to send. (%x)", status);
+ udelay(300); /* Give AP CPU some time to accept the IPI */
+
+ /* Fixup for Pentium erratum 3AP, clear errors */
+ if (maxlvt > 3)
+ apic_write(APIC_ESR, 0);
+
+ /* Verify that IPI was accepted */
+ status = (apic_read(APIC_ESR) & 0xEF);
+ if (status)
+ panic("STARTUP IPI ERROR: failed to accept. (%x)", status);
+}
+
+/**
+ * Sends an inter-processor interrupt (IPI) to the specified CPU.
+ * Note that the IPI has not necessarily been delivered when this function
+ * returns.
+ */
+void
+lapic_send_ipi(
+ unsigned int cpu, /* Logical CPU ID */
+ unsigned int vector /* Interrupt vector to send */
+)
+{
+ uint32_t status;
+ unsigned int apic_id;
+
+ /* Wait for idle */
+ status = lapic_wait4_icr_idle();
+ if (status)
+ panic("lapic_wait4_icr_idle() timed out. (%x)", status);
+
+ /* Set target CPU */
+ apic_id = cpu_info[cpu].arch.apic_id;
+ apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apic_id));
+
+ /* Send the IPI */
+ if (unlikely(vector == NMI_VECTOR))
+ apic_write(APIC_ICR, APIC_DEST_PHYSICAL|APIC_DM_NMI);
+ else
+ apic_write(APIC_ICR, APIC_DEST_PHYSICAL|APIC_DM_FIXED|vector);
+}
+
+/**
+ * Converts an entry in a local APIC's Local Vector Table to a
+ * human-readable string.
+ */
+static char *
+lvt_stringify(uint32_t entry, char *buf)
+{
+ uint32_t delivery_mode = GET_APIC_DELIVERY_MODE(entry);
+
+ if (delivery_mode == APIC_MODE_FIXED) {
+ sprintf(buf, "FIXED -> IDT_VECTOR %d",
+ entry & APIC_VECTOR_MASK
+ );
+ } else if (delivery_mode == APIC_MODE_NMI) {
+ sprintf(buf, "NMI -> IDT VECTOR 2");
+ } else if (delivery_mode == APIC_MODE_EXTINT) {
+ sprintf(buf, "ExtINT, hooked to old 8259A PIC");
+ } else {
+ sprintf(buf, "UNKNOWN");
+ }
+
+ if (entry & APIC_LVT_MASKED)
+ strcat(buf, ", MASKED");
+
+ return buf;
+}
+
+/**
+ * Prints various local APIC registers of interest to the console.
+ */
+void
+lapic_dump(void)
+{
+ char buf[128];
+
+ printk(KERN_DEBUG "LOCAL APIC DUMP (LOGICAL CPU #%d):\n", this_cpu);
+
+ /*
+ * Lead off with the important stuff...
+ */
+ printk(KERN_DEBUG
+ " ID: 0x%08x (id=%d)\n",
+ apic_read(APIC_ID),
+ GET_APIC_ID(apic_read(APIC_ID))
+ );
+ printk(KERN_DEBUG
+ " VER: 0x%08x (version=0x%x, max_lvt=%d)\n",
+ apic_read(APIC_LVR),
+ GET_APIC_VERSION(apic_read(APIC_LVR)),
+ GET_APIC_MAXLVT(apic_read(APIC_LVR))
+ );
+ printk(KERN_DEBUG
+ " ESR: 0x%08x (Error Status Reg, non-zero is bad)\n",
+ apic_read(APIC_ESR)
+ );
+ printk(KERN_DEBUG
+ " SVR: 0x%08x (Spurious vector=%d, %s)\n",
+ apic_read(APIC_SPIV),
+ apic_read(APIC_SPIV) & APIC_VECTOR_MASK,
+ (apic_read(APIC_SPIV) & APIC_SPIV_APIC_ENABLED)
+ ? "APIC IS ENABLED"
+ : "APIC IS DISABLED"
+ );
+
+ /*
+ * Local Vector Table
+ */
+ printk(KERN_DEBUG " Local Vector Table Entries:\n");
+ printk(KERN_DEBUG " LVT[0] Timer: 0x%08x (%s)\n",
+ apic_read(APIC_LVTT),
+ lvt_stringify(apic_read(APIC_LVTT), buf)
+ );
+ printk(KERN_DEBUG " LVT[1] Thermal: 0x%08x (%s)\n",
+ apic_read(APIC_LVTTHMR),
+ lvt_stringify(apic_read(APIC_LVTTHMR), buf)
+ );
+ printk(KERN_DEBUG " LVT[2] Perf Cnt: 0x%08x (%s)\n",
+ apic_read(APIC_LVTPC),
+ lvt_stringify(apic_read(APIC_LVTPC), buf)
+ );
+ printk(KERN_DEBUG " LVT[3] LINT0 Pin: 0x%08x (%s)\n",
+ apic_read(APIC_LVT0),
+ lvt_stringify(apic_read(APIC_LVT0), buf)
+ );
+ printk(KERN_DEBUG " LVT[4] LINT1 Pin: 0x%08x (%s)\n",
+ apic_read(APIC_LVT1),
+ lvt_stringify(apic_read(APIC_LVT1), buf)
+ );
+ printk(KERN_DEBUG " LVT[5] Error: 0x%08x (%s)\n",
+ apic_read(APIC_LVTERR),
+ lvt_stringify(apic_read(APIC_LVTERR), buf)
+ );
+
+ /*
+ * APIC timer configuration registers
+ */
+ printk(KERN_DEBUG " Local APIC Timer:\n");
+ printk(KERN_DEBUG " DCR (Divide Config Reg): 0x%08x\n",
+ apic_read(APIC_TDCR)
+ );
+ printk(KERN_DEBUG " ICT (Initial Count Reg): 0x%08x\n",
+ apic_read(APIC_TMICT)
+ );
+ printk(KERN_DEBUG " CCT (Current Count Reg): 0x%08x\n",
+ apic_read(APIC_TMCCT)
+ );
+
+ /*
+ * Logical APIC addressing mode registers
+ */
+ printk(KERN_DEBUG " Logical Addressing Mode Information:\n");
+ printk(KERN_DEBUG " LDR (Logical Dest Reg): 0x%08x (id=%d)\n",
+ apic_read(APIC_LDR),
+ GET_APIC_LOGICAL_ID(apic_read(APIC_LDR))
+ );
+ printk(KERN_DEBUG " DFR (Dest Format Reg): 0x%08x (%s)\n",
+ apic_read(APIC_DFR),
+ (apic_read(APIC_DFR) == APIC_DFR_FLAT) ? "FLAT" : "CLUSTER"
+ );
+
+ /*
+ * Task/processor/arbitration priority registers
+ */
+ printk(KERN_DEBUG " Task/Processor/Arbitration Priorities:\n");
+ printk(KERN_DEBUG " TPR (Task Priority Reg): 0x%08x\n",
+ apic_read(APIC_TASKPRI)
+ );
+ printk(KERN_DEBUG " PPR (Processor Priority Reg): 0x%08x\n",
+ apic_read(APIC_PROCPRI)
+ );
+ printk(KERN_DEBUG " APR (Arbitration Priority Reg): 0x%08x\n",
+ apic_read(APIC_ARBPRI)
+ );
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/smp.h>
+#include <lwk/delay.h>
+#include <lwk/bootmem.h>
+#include <lwk/task.h>
+#include <lwk/sched.h>
+#include <arch/atomic.h>
+#include <arch/apicdef.h>
+#include <arch/apic.h>
+#include <arch/desc.h>
+
+/**
+ * MP boot trampoline 80x86 program as an array.
+ */
+extern unsigned char trampoline_data[];
+extern unsigned char trampoline_end[];
+
+/**
+ * These specify the initial stack pointer and instruction pointer for a
+ * newly booted CPU.
+ */
+extern volatile unsigned long init_rsp;
+extern void (*initial_code)(void);
+
+void __init
+start_secondary(void)
+{
+ cpu_init();
+ cpu_set(this_cpu, cpu_online_map);
+ schedule(); /* runs idle_task, since that's the only task
+ * on the CPU's run queue at this point */
+}
+
+void __init
+arch_boot_cpu(unsigned int cpu)
+{
+ union task_union *new_task_union;
+ struct task_struct *new_task;
+
+ /*
+ * Setup the 'trampoline' cpu boot code. The trampoline contains the
+ * first code executed by the CPU being booted. x86 CPUs boot in
+ * pre-historic 16-bit 'real mode'... the trampoline does the messy
+ * work to get us to 64-bit long mode and then calls the *initial_code
+ * kernel entry function.
+ */
+ memcpy(__va(SMP_TRAMPOLINE_BASE), trampoline_data,
+ trampoline_end - trampoline_data
+ );
+
+ /*
+ * Allocate memory for the new CPU's GDT.
+ */
+ cpu_gdt_descr[cpu].address = (unsigned long) kmem_get_pages(0);
+
+ /*
+ * Allocate memory for the new CPU's bootstrap task.
+ */
+ new_task_union = kmem_get_pages(TASK_ORDER);
+ new_task = &new_task_union->task_info;
+
+ /*
+ * Initialize the bare minimum info needed to bootstrap the new CPU.
+ */
+ new_task->id = 0;
+ new_task->aspace = &bootstrap_aspace;
+ new_task->cpu_id = cpu;
+ strcpy(new_task->name, "bootstrap");
+ list_head_init(&new_task->sched_link);
+
+ /*
+ * Set the initial kernel entry point and stack pointer for the new CPU.
+ */
+ initial_code = start_secondary;
+ init_rsp = (unsigned long)new_task_union
+ + sizeof(union task_union) - 1;
+
+ /*
+ * Boot it!
+ */
+ lapic_send_init_ipi(cpu);
+ lapic_send_startup_ipi(cpu, SMP_TRAMPOLINE_BASE);
+ lapic_send_startup_ipi(cpu, SMP_TRAMPOLINE_BASE);
+}
+
--- /dev/null
+/*
+ * Intel Multiprocessor Specification 1.1 and 1.4
+ * compliant MP-table parsing routines.
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ * Fixes
+ * Erich Boleyn : MP v1.4 and additional changes.
+ * Alan Cox : Added EBDA scanning
+ * Ingo Molnar : various cleanups and rewrites
+ * Maciej W. Rozycki: Bits for default MP configurations
+ * Paul Diefenbaugh: Added full ACPI support
+ */
+
+#include <lwk/smp.h>
+#include <lwk/init.h>
+#include <lwk/bootmem.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/params.h>
+#include <arch/io.h>
+#include <arch/mpspec.h>
+#include <arch/proto.h>
+#include <arch/io_apic.h>
+
+/**
+ * Points to the MP table, once and if it is found.
+ * This gets initialized by find_mp_table().
+ */
+static struct intel_mp_floating *mpf_found;
+
+/**
+ * Physical CPU ID of the bootstrap CPU (the BP).
+ */
+unsigned int __initdata boot_phys_cpu_id = -1U;
+
+/**
+ * The number of CPUs in the system.
+ */
+unsigned int __initdata num_cpus = 0;
+
+/**
+ * Map of all CPUs present.
+ * Bits set represent physical CPU IDs present.
+ */
+physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
+
+/**
+ * Version information for every Local APIC in the system.
+ * The array is indexed by APIC ID.
+ */
+unsigned char apic_version[MAX_APICS];
+
+/**
+ * MP Bus information
+ */
+static int mp_current_pci_id = 0;
+unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
+
+/**
+ * MP IO APIC information
+ */
+int nr_ioapics = 0;
+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
+
+/**
+ * MP IRQ information
+ */
+int mp_irq_entries = 0;
+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+
+/* TODO: move these */
+int pic_mode;
+
+/**
+ * Computes the checksum of an MP configuration block.
+ */
+static int __init
+mpf_checksum(unsigned char *mp, int len)
+{
+ int sum = 0;
+ while (len--)
+ sum += *mp++;
+ return sum & 0xFF;
+}
+
+/**
+ * Parses an MP table CPU entry.
+ */
+static void __init
+MP_processor_info(struct mpc_config_processor *m)
+{
+ int cpu;
+ int is_bp;
+ unsigned char ver;
+ cpumask_t tmp_map;
+
+ if (!(m->mpc_cpuflag & CPU_ENABLED))
+ panic("A disabled CPU was encountered\n");
+
+ /* Count the new CPU */
+ if (++num_cpus > NR_CPUS)
+ panic("NR_CPUS limit of %i reached.\n", NR_CPUS);
+
+ /*
+ * Determine if this is the bootstrap processor...
+ * the one responsible for booting the other CPUs.
+ */
+ is_bp = (m->mpc_cpuflag & CPU_BOOTPROCESSOR);
+
+ /*
+ * Assign a logical CPU ID.
+ * The bootstrap CPU is always assigned logical ID 0.
+ * All other CPUs are assigned the lowest ID available.
+ */
+ if (is_bp) {
+ cpu = 0;
+ } else {
+ cpus_complement(tmp_map, cpu_present_map);
+ cpu = first_cpu(tmp_map);
+ }
+
+ /* Validate APIC version, fixing up if necessary. */
+ ver = m->mpc_apicver;
+ if (ver == 0x0) {
+ printk(KERN_ERR "BIOS bug, APIC version is 0 for PhysCPU#%d! "
+ "fixing up to 0x10. (tell your hw vendor)\n",
+ m->mpc_apicid);
+ ver = 0x10;
+ }
+
+ /* Remember the APIC's version */
+ apic_version[m->mpc_apicid] = ver;
+
+ /* Add the CPU to the map of physical CPU IDs present. */
+ physid_set(m->mpc_apicid, phys_cpu_present_map);
+
+ /* Remember the physical CPU ID of the bootstrap CPU. */
+ if (is_bp)
+ boot_phys_cpu_id = m->mpc_apicid;
+
+ /* Add the CPU to the map of logical CPU IDs present. */
+ cpu_set(cpu, cpu_present_map);
+
+ /* Store ID information. */
+ cpu_info[cpu].logical_id = cpu;
+ cpu_info[cpu].physical_id = m->mpc_apicid;
+ cpu_info[cpu].arch.apic_id = m->mpc_apicid;
+
+ printk(KERN_DEBUG
+ "Physical CPU #%d -> Logical CPU #%d, %d:%d APIC version %d%s\n",
+ m->mpc_apicid,
+ cpu,
+ (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
+ (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
+ ver,
+ is_bp ? " (Bootstrap CPU)" : "");
+}
+
+/**
+ * Parses an MP table BUS entry.
+ */
+static void __init
+MP_bus_info(struct mpc_config_bus *m)
+{
+ char str[7];
+
+ memcpy(str, m->mpc_bustype, 6);
+ str[6] = 0;
+ printk(KERN_DEBUG "Bus #%d is %s\n", m->mpc_busid, str);
+
+ if (strncmp(str, "ISA", 3) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
+ } else if (strncmp(str, "EISA", 4) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
+ } else if (strncmp(str, "PCI", 3) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
+ mp_current_pci_id++;
+ } else if (strncmp(str, "MCA", 3) == 0) {
+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
+ } else {
+ printk(KERN_ERR "Unknown bustype %s\n", str);
+ }
+}
+
+/**
+ * Parses an MP table BUS entry.
+ */
+static void __init
+MP_ioapic_info(struct mpc_config_ioapic *m)
+{
+ if (!(m->mpc_flags & MPC_APIC_USABLE)) {
+ printk(KERN_DEBUG "Encountered unusable APIC, ignoring it.\n");
+ return;
+ }
+
+ printk(KERN_DEBUG "I/O APIC #%d Version %d at 0x%X\n",
+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
+ if (nr_ioapics >= MAX_IO_APICS) {
+ printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
+ MAX_IO_APICS, nr_ioapics);
+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
+ }
+ if (!m->mpc_apicaddr) {
+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
+ " found in MP table, skipping!\n");
+ return;
+ }
+ mp_ioapics[nr_ioapics] = *m;
+ nr_ioapics++;
+
+ ioapic_id[ioapic_num] = m->mpc_apicid;
+ ioapic_phys_addr[ioapic_num] = m->mpc_apicaddr;
+ ioapic_num++;
+}
+
+/**
+ * Parses an MP table IRQ source entry.
+ */
+static void __init
+MP_intsrc_info(struct mpc_config_intsrc *m)
+{
+ mp_irqs [mp_irq_entries] = *m;
+ printk(KERN_DEBUG
+ "Int: type %d, pol %d, trig %d, bus %d,"
+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
+ m->mpc_irqtype, m->mpc_irqflag & 3,
+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
+ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
+ panic("Max # of irq sources exceeded!!\n");
+}
+
+/**
+ * Parses an MP table LINT entry.
+ */
+static void __init
+MP_lintsrc_info (struct mpc_config_lintsrc *m)
+{
+ printk(KERN_DEBUG
+ "Lint: type %d, pol %d, trig %d, bus %d,"
+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
+ m->mpc_irqtype, m->mpc_irqflag & 3,
+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
+ /*
+ * Well it seems all SMP boards in existence
+ * use ExtINT/LVT1 == LINT0 and
+ * NMI/LVT2 == LINT1 - the following check
+ * will show us if this assumptions is false.
+ * Until then we do not have to add baggage.
+ */
+ if ((m->mpc_irqtype == mp_ExtINT) &&
+ (m->mpc_destapiclint != 0))
+ BUG();
+ if ((m->mpc_irqtype == mp_NMI) &&
+ (m->mpc_destapiclint != 1))
+ BUG();
+}
+
+/**
+ * Parses the input MP table, storing various bits of information in global
+ * variables as it goes.
+ */
+static int __init
+read_mpc(struct mp_config_table *mpc)
+{
+ char str[16];
+ int count=sizeof(*mpc);
+ unsigned char *mpt=((unsigned char *)mpc)+count;
+
+ if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) {
+ printk(KERN_ERR "SMP mptable: bad signature [%c%c%c%c]!\n",
+ mpc->mpc_signature[0],
+ mpc->mpc_signature[1],
+ mpc->mpc_signature[2],
+ mpc->mpc_signature[3]);
+ return -1;
+ }
+ if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) {
+ printk(KERN_ERR "SMP mptable: checksum error!\n");
+ return -1;
+ }
+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
+ printk(KERN_ERR "SMP mptable: bad table version (%d)!\n",
+ mpc->mpc_spec);
+ return -1;
+ }
+ if (!mpc->mpc_lapic) {
+ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
+ return -1;
+ }
+ memcpy(str, mpc->mpc_oem, 8);
+ str[8]=0;
+ printk(KERN_DEBUG " OEM ID: %s\n", str);
+
+ memcpy(str, mpc->mpc_productid, 12);
+ str[12]=0;
+ printk(KERN_DEBUG " Product ID: %s\n", str);
+
+ printk(KERN_DEBUG " APIC at: 0x%X\n", mpc->mpc_lapic);
+
+ /* Save the local APIC address, it might be non-default. */
+ lapic_phys_addr = mpc->mpc_lapic;
+
+ /* Now process all of the configuration blocks in the table. */
+ while (count < mpc->mpc_length) {
+ switch(*mpt) {
+ case MP_PROCESSOR:
+ {
+ struct mpc_config_processor *m=
+ (struct mpc_config_processor *)mpt;
+ MP_processor_info(m);
+ mpt += sizeof(*m);
+ count += sizeof(*m);
+ break;
+ }
+ case MP_BUS:
+ {
+ struct mpc_config_bus *m=
+ (struct mpc_config_bus *)mpt;
+ MP_bus_info(m);
+ mpt += sizeof(*m);
+ count += sizeof(*m);
+ break;
+ }
+ case MP_IOAPIC:
+ {
+ struct mpc_config_ioapic *m=
+ (struct mpc_config_ioapic *)mpt;
+ MP_ioapic_info(m);
+ mpt+=sizeof(*m);
+ count+=sizeof(*m);
+ break;
+ }
+ case MP_INTSRC:
+ {
+ struct mpc_config_intsrc *m=
+ (struct mpc_config_intsrc *)mpt;
+
+ MP_intsrc_info(m);
+ mpt+=sizeof(*m);
+ count+=sizeof(*m);
+ break;
+ }
+ case MP_LINTSRC:
+ {
+ struct mpc_config_lintsrc *m=
+ (struct mpc_config_lintsrc *)mpt;
+ MP_lintsrc_info(m);
+ mpt+=sizeof(*m);
+ count+=sizeof(*m);
+ break;
+ }
+ }
+ }
+ //clustered_apic_check();
+ if (!num_cpus)
+ printk(KERN_ERR "SMP mptable: no CPUs registered!\n");
+ return 0;
+}
+
+/**
+ * Determines the multiprocessor configuration.
+ * The configuration information is stored in global variables so nothing is
+ * returned. find_mp_config() must be called before this function.
+ */
+void __init
+get_mp_config(void)
+{
+ struct intel_mp_floating *mpf = mpf_found;
+ if (!mpf) {
+ printk(KERN_WARNING "Assuming 1 CPU.\n");
+ num_cpus = 1;
+ /* Assign the only CPU logical=physical ID 0 */
+ cpu_set(0, cpu_present_map);
+ physid_set(0, phys_cpu_present_map);
+ cpu_info[0].logical_id = 0;
+ cpu_info[0].physical_id = 0;
+ cpu_info[0].arch.apic_id = 0;
+ return;
+ }
+
+ printk(KERN_DEBUG "Intel MultiProcessor Specification v1.%d\n",
+ mpf->mpf_specification);
+ if (mpf->mpf_feature2 & (1<<7)) {
+ printk(KERN_DEBUG " IMCR and PIC compatibility mode.\n");
+ pic_mode = 1;
+ } else {
+ printk(KERN_DEBUG " Virtual Wire compatibility mode.\n");
+ pic_mode = 0;
+ }
+
+ /*
+ * We don't support the default MP configuration.
+ * All supported multi-CPU systems must provide a full MP table.
+ */
+ if (mpf->mpf_feature1 != 0)
+ BUG();
+ if (!mpf->mpf_physptr)
+ BUG();
+
+ /*
+ * Set this early so we don't allocate CPU0 if the
+ * MADT list doesn't list the bootstrap processor first.
+ * The bootstrap processor has to be logical ID 0... which
+ * we are reserving here.
+ */
+ cpu_set(0, cpu_present_map);
+
+ /*
+ * Parse the MP configuration
+ */
+ if (read_mpc(phys_to_virt(mpf->mpf_physptr)))
+ panic("BIOS bug, MP table errors detected! (tell your hw vendor)\n");
+}
+
+/**
+ * Scans a region of memory for the MP table.
+ */
+static int __init
+scan(unsigned long base, unsigned long length)
+{
+ unsigned int *bp = phys_to_virt(base);
+ struct intel_mp_floating *mpf;
+
+ printk(KERN_DEBUG "Scan for MP table from 0x%p for %ld bytes\n",
+ bp, length);
+
+ while (length > 0) {
+ mpf = (struct intel_mp_floating *)bp;
+ if ((*bp == SMP_MAGIC_IDENT) &&
+ (mpf->mpf_length == 1) &&
+ !mpf_checksum((unsigned char *)bp, 16) &&
+ ((mpf->mpf_specification == 1)
+ || (mpf->mpf_specification == 4)) ) {
+
+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
+ if (mpf->mpf_physptr)
+ reserve_bootmem(mpf->mpf_physptr, PAGE_SIZE);
+ mpf_found = mpf;
+ printk(KERN_DEBUG "Found MP table at: 0x%p\n", mpf);
+ return 1;
+ }
+ bp += 4;
+ length -= 16;
+ }
+ return 0;
+}
+
+/**
+ * Locates the MP table, if there is one.
+ * This does not parse the MP table... get_mp_config() does that.
+ */
+void __init
+find_mp_config(void)
+{
+ /*
+ * 1) Scan the bottom 1K for a signature
+ * 2) Scan the top 1K of base RAM
+ * 3) Scan the 64K of bios
+ */
+ if (scan(0x0,0x400) ||
+ scan(639*0x400,0x400) ||
+ scan(0xF0000,0x10000))
+ return;
+
+ /*
+ * If it is an MP machine we should know now.
+ *
+ * If not, make a final effort and scan the
+ * Extended BIOS Data Area.
+ *
+ * NOTE! There are Linux loaders that will corrupt the EBDA
+ * area, and as such this kind of MP config may be less
+ * trustworthy, simply because the MP table may have been
+ * stomped on during early boot. These loaders are buggy and
+ * should be fixed.
+ */
+ if (scan(ebda_addr, 0x1000)) {
+ printk(KERN_WARNING "MP table found in EBDA\n");
+ return;
+ }
+
+ /* If we have come this far, we did not find an MP table */
+ printk(KERN_DEBUG "No MP table found.\n");
+}
+
--- /dev/null
+obj-$(CONFIG_CRAY_XT) += l0rca.o
--- /dev/null
+/* -*- mode: c; c-basic-offset: 8; -*-
+ */
+/*
+ * RCA: Interface between CRAY RCA and linux kernel
+ *
+ * Copyright (c) 2003 Cray Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ */
+/*
+ * This code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+
+#include <lwk/kernel.h>
+#include <lwk/string.h>
+#include <lwk/errno.h>
+#include <lwk/delay.h>
+#include <lwk/version.h>
+#include <rca/rca_l0_linux.h>
+#include <rca/rca_l0.h>
+extern void set_debug_traps(void) ; /* GDB routine */
+
+#if defined(CONFIG_CRAY_RS_DEBUG)
+#define assert(expr) \
+if(!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+}
+#else
+#define assert(expr)
+#endif /* CONFIG_CRAY_RS_DEBUG */
+
+/* States for up channel. Used for calling the tx_done callback only if reqd */
+#define CHAN_TX_STATE_AVAIL (0)
+#define CHAN_TX_STATE_FULL (1)
+#define GET_CHAN_STATE(x) ((x)->state)
+#define SET_CHAN_STATE(x, s) ((x)->state = (s))
+
+typedef struct l0rca_mapped_ch {
+ uint32_t num_obj; /* number of objects */
+ uint32_t intr_bit;
+ volatile uint32_t *ch_ridx; /* Pointer to ridx */
+ volatile uint32_t *ch_widx; /* Pointer to widx */
+ rs_event_t *ch_buf_ptr; /* Pointer to channel buffer */
+ l0rca_down_handle_t down_handler;
+ l0rca_up_handle_t up_handler;
+ uint32_t reg_count;
+ int poll; /* timeout */
+ int tshhld; /* timeout */
+ int state; /* outgoing chan full or not */
+} l0rca_mapped_ch_t;
+
+#define L0RCA_INITVAL (0xAA55F00D)
+
+typedef struct l0rca_mapped_config {
+ uint32_t version; /* config version */
+ rs_node_t proc_id; /* node id */
+ int32_t proc_num; /* cpu number (0-3) */
+ l0rca_mapped_ch_t ch_data[NUM_L0RCA_CHANNELS];
+ volatile uint32_t *l0rca_l0_intr_ptr; /* interrupt to L0 */
+ uint32_t initialized;
+} l0rca_mapped_config_t;
+
+/* Our copy with virt addrs; so we can directly access the config area */
+l0rca_mapped_config_t l0rca_early_cfg = {0};
+
+/* Pointer to the actual config struct in Seastar RAM shared memory */
+l0rca_config_t *l0rca_cfg = NULL;
+
+/* Store the size of the event header without the msg body */
+uint32_t rs_ev_hdr_sz;
+
+
+void send_intr_to_l0(l0rca_mapped_ch_t *ch)
+{
+ volatile uint32_t *addr = &(((l0rca_intr_t *)(l0rca_early_cfg.l0rca_l0_intr_ptr))->l0r_intr_set);
+ SSPUT32(addr, (ch)->intr_bit);
+}
+
+/*
+ * Function: l0rca_event_data
+ *
+ * Description: Return a pointer to the data portion and length of the
+ * data portion of the event.
+ * NB: Reflect any changes to l0rca_event_data in gdb_l0rca_event_data
+ *
+ * Arguments: rs_event_t *evp IN: Event whose data is of interest
+ * void **data OUT: Upon return will point to data portion of event
+ * int32_t *len OUT: Upon return will have the length of the data
+ * portion of the event
+ *
+ * Returns: No Return Value.
+ * Note: The main purpose of this routine is to read in the data portion of the
+ * event from Seastar memory.
+ */
+void l0rca_event_data(rs_event_t *evp, void **data, int32_t *len)
+{
+ /* Get length and data portion from the event */
+ *len = evp->ev_len;
+ *data = &evp->ev_data;
+} /* l0rca_event_data */
+
+/*
+ * Function: l0rca_get_proc_id
+ *
+ * Description: Return the node/processor id.
+ *
+ * Arguments: None
+ *
+ * Returns: The proc id.
+ */
+rs_node_t l0rca_get_proc_id(void)
+{
+ return l0rca_cfg->proc_id;
+}
+
+/*
+ * Function: l0rca_get_proc_num
+ *
+ * Description: Returns this processor's CPU number, 0-3.
+ *
+ * Arguments: None.
+ *
+ * Returns: The CPU number of this node.
+ */
+int l0rca_get_proc_num(void)
+{
+ int tmp;
+ SSGET32(&(l0rca_cfg->proc_num),tmp);
+ return(tmp);
+}
+
+/*
+ * Function: set_chan_tx_state
+ *
+ * Description: Looks at the read and write indices to determine if the
+ * channel is full or not. Sets state accordingly.
+ * NB: Reflect any changes to set_chan_tx_state in gdb_set_chan_tx_state.
+ *
+ * Arguments: l0rca_mapped_ch_t *ch_ptr IN: Pointer to channel being set.
+ *
+ * Returns: None
+ */
+static inline void set_chan_tx_state(l0rca_mapped_ch_t *ch_ptr)
+{
+ volatile uint32_t wx, rx;
+ int not_full;
+ SSGET32(ch_ptr->ch_widx, wx);
+ SSGET32(ch_ptr->ch_ridx, rx);
+ not_full = (wx - rx) < ch_ptr->num_obj;
+ SET_CHAN_STATE(ch_ptr, not_full ? CHAN_TX_STATE_AVAIL : CHAN_TX_STATE_FULL);
+}
+
+
+/*
+ * Function: l0rca_init_config
+ *
+ * Description: Read L0 - RCA communication config structure and populate
+ * our personal copy. If there is any error, the OS panics
+ * since not being able to communicate with L0 is a total disaster.
+ * If already initialized then returns silently.
+ *
+ * Arguments: None.
+ *
+ * Returns: None
+ */
+
+void l0rca_init_config(void)
+{
+ rs_event_t *ev_buf;
+ int i;
+ volatile uint64_t tmp64;
+ volatile uint32_t tmp32;
+
+ /*
+ * Check if already initialized; No locking is needed as this
+ * is called during the boot process from within the kernel (as
+ * opposed from a driver)
+ */
+ if (L0RCA_INITVAL == l0rca_early_cfg.initialized)
+ return;
+
+ l0rca_cfg = (l0rca_config_t *)rca_l0_comm_va(L0_SIC_RAM);
+
+ /* TODO - first order of business is to check the Version Number */
+ /* Also, should we panic if version mismatch? */
+#ifdef CONFIG_CRAY_RS_DEBUG
+ printk ("l0 config at virtual %p\n", l0rca_cfg);
+ printk ("Phys event bufs 0x%llx intr reg 0x%llx\n",
+ l0rca_cfg->l0rca_buf_addr,
+ l0rca_cfg->l0rca_l0_intr_addr);
+#endif /* CONFIG_CRAY_RS_DEBUG */
+
+ /* convert event buffer address from physical to virtual */
+ SSGET64(&(l0rca_cfg->l0rca_buf_addr),tmp64);
+ ev_buf = (rs_event_t *)rca_l0_comm_va(tmp64);
+
+ /* convert intr reg address from physical to virtual */
+ SSGET64(&(l0rca_cfg->l0rca_l0_intr_addr), tmp64);
+ l0rca_early_cfg.l0rca_l0_intr_ptr = (uint32_t *)rca_l0_comm_va(tmp64);
+
+#ifdef CONFIG_CRAY_RS_DEBUG
+ printk ("event bufs %p intr reg %p\n", ev_buf,
+ l0rca_early_cfg.l0rca_l0_intr_ptr);
+#endif /* CONFIG_CRAY_RS_DEBUG */
+
+ /* Now setup the channel buffers */
+ for (i = 0; i < NUM_L0RCA_CHANNELS; i++)
+ {
+ int num_obj;
+ SSGET32(&(l0rca_cfg->chnl_data[i].num_obj), tmp32);
+ num_obj = tmp32;
+
+ /* Skip if channel is unused */
+ if (!num_obj)
+ continue;
+
+ /* Ensure num_obj is a power of 2 */
+ if (num_obj & (num_obj - 1))
+ {
+#if 0
+ panic ("l0rca_init_config: num_obj[%u] for channel %d is not power of 2\n",
+ num_obj, i);
+#endif
+ }
+ l0rca_early_cfg.ch_data[i].num_obj = num_obj;
+ SSGET32(&(l0rca_cfg->chnl_data[i].l0_intr_bit), tmp32);
+ l0rca_early_cfg.ch_data[i].intr_bit = tmp32;
+
+ /* Point to the read/writew index */
+ l0rca_early_cfg.ch_data[i].ch_ridx = &l0rca_cfg->chnl_data[i].ridx;
+ l0rca_early_cfg.ch_data[i].ch_widx = &l0rca_cfg->chnl_data[i].widx;
+ l0rca_early_cfg.ch_data[i].ch_buf_ptr = ev_buf;
+
+ ev_buf += num_obj;
+
+#ifdef CONFIG_CRAY_RS_DEBUG
+ printk ("Buffer %p for channel %d rd %u wr %u\n",
+ l0rca_early_cfg.ch_data[i].ch_buf_ptr, i,
+ l0rca_cfg->chnl_data[i].ridx,
+ l0rca_cfg->chnl_data[i].widx);
+#endif /* CONFIG_CRAY_RS_DEBUG */
+ } /* End of for */
+
+ /* Set the remaining fields */
+ SSGET32(&(l0rca_cfg->version), l0rca_early_cfg.version);
+ SSGET32(&(l0rca_cfg->proc_id), l0rca_early_cfg.proc_id);
+ SSGET32(&(l0rca_cfg->proc_num), l0rca_early_cfg.proc_num);
+
+ /* Assumes ev_data is the last element TODO: this should be
+ * defined via a macro in the rs_event_t structure definition */
+ rs_ev_hdr_sz = offsetof(rs_event_t, ev_data);
+
+ /* Indicate we have a initialized the mapped copy */
+ l0rca_early_cfg.initialized = L0RCA_INITVAL;
+
+ return;
+}
+
+/*
+ * Function: register_ch_up
+ *
+ * Description: Register function for the upload channel. It is expected that
+ * there be at most one registered user for an upload channel. This user
+ * provides a callback to be invoked when the buffer drains below tshhld
+ * (only if the buffer became full last time the tshhld was crossed)
+ *
+ * Arguments: int ch_num IN: channel number to register on
+ * l0rca_up_handle_t handler IN: callback routine
+ * int tshhld IN: buffer to drain before invoking callback; ignored
+ * if poll is negative.
+ * int poll IN: if > zero - duration in ms to check for buffer drain
+ * if = zero - tx done interrupt invokes callback (TODO)
+ * if < zero - do nothing. It is assumed that the user (TODO)
+ * has her own means to check for buffer drain
+ *
+ * Returns: -EBUSY - If another user is already registered.
+ * -EINVAL - if ch_num is not in range.
+ * zero (SUCCESS) otherwise.
+ *
+ * Note: It is expected that the buffer is empty upon call to this routine.
+ * This should be true on system startup and after an unregister call.
+ *
+ * Note: As of 12/1/04, the polling is forced, and is hard coded in
+ * l0rca_linux.c. Thus, the tshhld and poll params have no meaning.
+ */
+int
+register_ch_up(int ch_num, l0rca_up_handle_t handler, int tshhld, int poll)
+{
+ volatile uint32_t wx, rx;
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+
+ if (NUM_L0RCA_CHANNELS <= ch_num)
+ return -EINVAL;
+
+ /* Allow only one user per channel */
+ if (ch_ptr->reg_count)
+ return -EBUSY;
+
+ SSGET32(ch_ptr->ch_widx, wx);
+ SSGET32(ch_ptr->ch_ridx, rx);
+ assert(wx == rx);
+
+ SET_CHAN_STATE(ch_ptr, CHAN_TX_STATE_AVAIL);
+ ch_ptr->down_handler = NULL; /* Clear the down handler */
+ ch_ptr->reg_count++;
+ ch_ptr->poll = poll;
+ ch_ptr->up_handler = handler;
+ ch_ptr->tshhld = tshhld;
+
+ return 0;
+}
+
+/*
+ * Function: register_ch_down
+ *
+ * Description: Register function for the download channel. It is expected that
+ * there be at most one registered user for a download channel. This user
+ * provides a callback to be invoked when data from L0 arrives on the channel.
+ *
+ * Arguments: int ch_num IN: channel number to register on
+ * l0rca_down_handle_t handler IN: callback routine
+ * int poll IN: if > zero - duration in ms to check for event
+ * if = zero - event arrival is interrupt driven.
+ * if < zero - do nothing. It is assumed that the user
+ * has her own means to check for event arrival.
+ *
+ * Returns: EBUSY - If another user is already registered.
+ * zero (SUCCESS) otherwise.
+ *
+ * Note: As of 12/1/04, the polling is forced, and is hard coded in
+ * l0rca_linux.c. Thus, the poll parameter has no meaning.
+ */
+int register_ch_down(int ch_num, l0rca_down_handle_t handler, int poll)
+{
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+
+ if (NUM_L0RCA_CHANNELS <= ch_num)
+ return -EINVAL;
+
+ /* Allow only one user per channel */
+ if (ch_ptr->reg_count)
+ return -EBUSY;
+
+ ch_ptr->reg_count++;
+ ch_ptr->down_handler = handler;
+ ch_ptr->poll = poll;
+
+#if 0
+ /* Do any OS specific initialization e.g. set irq, timers etc. */
+ if (l0rca_os_init())
+ {
+ panic ("Unable to initialize OS service for L0RCA interface\n");
+ }
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_CRAY_RS_DEBUG
+uint32_t l0rca_buf_full;
+#endif /* CONFIG_CRAY_RS_DEBUG */
+/*
+ * Function: ch_send_data
+ *
+ * Description: Sends data towards the L0.
+ * The data that buf points to is sent as the payload in an rs_event structure.
+ * The header is a separate parameter and the send routine directly copies
+ * the header and the data into the circular buffer, thus avoiding a copy.
+ *
+ * NB: Reflect any changes to ch_send_data in gdb_ch_send_data
+ *
+ * Arguments: int ch_num IN: channel number on which to send data
+ * rs_event_t *ev_hdr IN: Header without len & timestamp
+ * void* buf IN: Buffer with data
+ * int len IN: length of data to transfer
+ *
+ * Returns: -EINVAL - if no user registered on the channel (Debug only)
+ * -EFAULT - if buf or ev_hdr is NULL (Debug only)
+ * -E2BIG - if len exceeds max event payload (RS_MSG_LEN) (Debug only)
+ * zero - SUCCESS, all bytes sent.
+ * > 0 - Bytes not sent. Sender should retry.
+ *
+ * Notes: data in buf will be copied to the channel buffer, therfore, upon
+ * return, user can free the buf. buf may not be NULL and the len must
+ * not be zero. Use ch_send_event to send events with zero length data
+ * portion.
+ */
+
+int ch_send_data(int ch_num, const rs_event_t *ev_hdr,
+ void* buf, unsigned int len)
+{
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+ uint32_t ev_len = 0;
+ volatile uint32_t tmpwx, tmprx;
+
+#ifdef CONFIG_CRAY_RS_DEBUG
+ uint32_t wr, rd, no_events;
+ if ((NULL == ev_hdr) || (NULL == buf))
+ {
+ return -EFAULT;
+ }
+
+ if ((NUM_L0RCA_CHANNELS <= ch_num) || (0 == len) ||
+ (L0RCA_INITVAL != l0rca_early_cfg.initialized) ||
+ (0 == ch_ptr->reg_count))
+ {
+ return -EINVAL;
+ }
+
+ /* Normalize the write & read indexes */
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ wr = tmpwx & (ch_ptr->num_obj - 1);
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ rd = tmprx & (ch_ptr->num_obj - 1);
+
+
+ /* Calculate the number of events we will be sending */
+ no_events = (len + RS_MSG_LEN -1)/RS_MSG_LEN;
+ if ((wr + (no_events) - rd) < ch_ptr->num_obj)
+ {
+ l0rca_buf_full++;
+ }
+
+#endif /* CONFIG_CRAY_RS_DEBUG */
+
+ /* Optimize for buf not full and only one event needed to send */
+
+ /*
+ * Masking of indexes is not needed for the check in while() below.
+ * Both widx & ridx are unsigned and increase monotonically such that
+ * ridx cannot lag behind widx by more than the circular buf size i.e.
+ * num_obj. widx will overflow before ridx.
+ * 'widx - ridx' will always yield the difference between the two
+ * even if widx has overflowed and ridx has not yet overflowed.
+ */
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ while ((tmpwx - tmprx) < ch_ptr->num_obj)
+ {
+ rs_event_t *wr_ev_ptr =
+ &ch_ptr->ch_buf_ptr[tmpwx & (ch_ptr->num_obj - 1)];
+
+ /* Copy same header for each event */
+ SSMEMPUT(wr_ev_ptr, (uint32_t*) ev_hdr, rs_ev_hdr_sz);
+
+ /* Copy the data portion over */
+ ev_len = (RS_MSG_LEN > len) ? len : RS_MSG_LEN;
+ SSPUT32(&(wr_ev_ptr->ev_len), ev_len);
+ SSMEMPUT((char*)wr_ev_ptr + rs_ev_hdr_sz, buf, RS_MSG_LEN);
+
+ /* TODO: Set the timestamp in each event */
+#if 0
+ SSPUT32(wr_ev_ptr->_ev_stp, 0x0);
+#endif /* 0 */
+ len -= ev_len;
+ /*
+ * After updating the widx, DO NOT access any field in that
+ * event. Though not desirable, the reader is free to alter
+ * fields in the event.
+ */
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSPUT32(ch_ptr->ch_widx, tmpwx + 1);
+ set_chan_tx_state(ch_ptr);
+
+ /* Let L0 know an ev is available */
+ send_intr_to_l0(ch_ptr);
+
+ if (0 == len)
+ break;
+
+ buf = (void *)(((char *)buf) + RS_MSG_LEN);
+
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ } /* End of while */
+ return len; /* bytes remaining, if any */
+}
+
+/*
+ * Function: ch_send_event
+ *
+ * Description: Sends an event to L0. An event with zero length data portion
+ * is supported.
+ *
+ * Arguments: int ch_num IN: channel number on which to send the event
+ * const rs_event_t *evp IN: EVent to send
+ *
+ * Returns: -EINVAL - if no user registered on the channel (Debug only)
+ * -EFAULT - if evp is NULL (Debug only)
+ * zero - SUCCESS, event sent.
+ * +EBUSY - Event not sent. Sender should retry.
+ *
+ * Notes: The event will be copied to the channel buffer, therfore, upon
+ * return, user may free the space associated with the event
+ */
+int ch_send_event(int ch_num, const rs_event_t *evp)
+{
+ int ret = 0;
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+ volatile uint32_t tmpwx, tmprx;
+
+#ifdef CONFIG_CRAY_RS_DEBUG
+ if (NULL == evp)
+ {
+ return -EFAULT;
+ }
+
+ if ((NUM_L0RCA_CHANNELS <= ch_num) ||
+ (L0RCA_INITVAL != l0rca_early_cfg.initialized) ||
+ (0 == ch_ptr->reg_count))
+ {
+ return -EINVAL;
+ }
+#endif /* CONFIG_CRAY_RS_DEBUG */
+
+ /* Optimize for circular buffer not full */
+
+ /*
+ * Masking of indexes is not needed for the check in while() below.
+ * Both widx & ridx are unsigned and increase monotonically such that
+ * ridx cannot lag behind widx by more than the circular buf size i.e.
+ * num_obj. widx will overflow before ridx.
+ * 'widx - ridx' will always yield the difference between the two
+ * even if widx has overflowed and ridx has not yet overflowed.
+ */
+ SSGET32(ch_ptr->ch_widx,tmpwx);
+ SSGET32(ch_ptr->ch_ridx,tmprx);
+ if((tmpwx - tmprx) < ch_ptr->num_obj)
+ {
+ rs_event_t *wr_ev_ptr =
+ &ch_ptr->ch_buf_ptr[tmpwx & (ch_ptr->num_obj - 1)];
+
+ /* Copy header & data length for the event */
+ SSMEMPUT(wr_ev_ptr, (uint32_t*)evp, rs_ev_hdr_sz + evp->ev_len);
+
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSPUT32(ch_ptr->ch_widx, tmpwx + 1);
+ set_chan_tx_state(ch_ptr);
+
+ /* Let L0 know an ev is available */
+ send_intr_to_l0(ch_ptr);
+ } else
+ {
+ ret = EBUSY;
+ }
+
+ return ret;
+}
+
+/*
+ * Function: l0rca_ch_get_event
+ *
+ * Description: Read an event from L0 (if any). If an event is availabe then
+ * the read pointer is advanced.
+ * NB: Reflect any changes to l0rca_ch_get_event in gdb_l0rca_ch_get_event
+ *
+ * Arguments: int ch_num IN: channel number from which to read the event
+ * rs_event_t *evp IN: Buffer to place the event
+ *
+ * Returns: -EINVAL - if no user registered on the channel (Debug only)
+ * -EFAULT - if evp is NULL (Debug only)
+ * zero - no event to receive at this time
+ * > 0 - Event received
+ *
+ */
+int l0rca_ch_get_event(int ch_num, rs_event_t *evp)
+{
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+ int ret = 0;
+ uint32_t nbytes;
+ volatile uint32_t tmpwx, tmprx;
+
+#ifdef CONFIG_CRAY_RS_DEBUG
+ if (NULL == evp)
+ return -EFAULT;
+
+ if ((NUM_L0RCA_CHANNELS <= ch_num) ||
+ (L0RCA_INITVAL != l0rca_early_cfg.initialized) ||
+ (0 == ch_ptr->reg_count))
+ return -EINVAL;
+#endif /* CONFIG_CRAY_RS_DEBUG */
+
+ SSGET32(ch_ptr->ch_widx,tmpwx);
+ SSGET32(ch_ptr->ch_ridx,tmprx);
+ if(tmpwx != tmprx)
+ {
+ rs_event_t *wr_ev_ptr =
+ &ch_ptr->ch_buf_ptr[tmprx & (ch_ptr->num_obj - 1)];
+
+ /* Copy over the event */
+ SSGET32(&(wr_ev_ptr->ev_len), nbytes);
+ SSMEMGET(evp, (uint32_t*)wr_ev_ptr, nbytes + rs_ev_hdr_sz);
+
+ /* Update the rd index */
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ SSPUT32(ch_ptr->ch_ridx, tmprx + 1);
+
+ /* Let L0 know that the event has been drained */
+ send_intr_to_l0(ch_ptr);
+
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * Function: unregister_ch_down
+ *
+ * Description: Unregister function for the upload channel. Use to indicate
+ * that the channel is no longer to be used. The read & write pointers are
+ * equalized to make the circ buffer empty.
+ *
+ * Arguments: int ch_num IN: channel number to unregister
+ *
+ * Returns: -EINVAL - if ch_num is not correct or no user registered
+ * zero (SUCCESS)
+ */
+int unregister_ch_down(int ch_num)
+{
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+ volatile uint32_t tmpwx;
+
+ if (NUM_L0RCA_CHANNELS <= ch_num)
+ return -EINVAL;
+
+ /* Check if user is using this channel */
+ if (!ch_ptr->reg_count)
+ return -EINVAL;
+
+ ch_ptr->down_handler = NULL;
+ ch_ptr->reg_count--;
+
+ /* Equalize the read & write pointers i.e. drain the circ buffer */
+ /* NOTE: We cannot really stop the L0 from sending data */
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSPUT32(ch_ptr->ch_ridx, tmpwx);
+
+ return 0;
+}
+
+/*
+ * Function: unregister_ch_up
+ *
+ * Description: Unregister function for the download channel. Use to indicate
+ * that the channel is no longer to be used.
+ *
+ * Arguments: int ch_num IN: channel number to unregister
+ *
+ * Returns: -EINVAL - if ch_num is not correct or no user registered
+ * zero (SUCCESS)
+ */
+int unregister_ch_up(int ch_num)
+{
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+ volatile uint32_t tmpwx, tmprx;
+
+ if (NUM_L0RCA_CHANNELS <= ch_num)
+ return -EINVAL;
+
+ /* Check if user is using this channel */
+ if (!ch_ptr->reg_count)
+ return -EINVAL;
+
+ /* Wait for events to be drained by the L0 */
+ SSGET32(ch_ptr->ch_widx,tmpwx);
+ SSGET32(ch_ptr->ch_ridx,tmprx);
+ while(tmpwx != tmprx)
+ {
+ udelay(1000);
+ SSGET32(ch_ptr->ch_widx,tmpwx);
+ SSGET32(ch_ptr->ch_ridx,tmprx);
+ }
+
+ ch_ptr->up_handler = NULL;
+ ch_ptr->reg_count--;
+
+ return 0;
+}
+
+/* TODO: If we decide to use the PKT_MODE register to indicate the channels
+ * that need attention then use PKT_MODE instead of looking at each channel.
+ * TODO: Currently the rx_done callback is invoked for each event in the
+ * incoming channel. Change this to be called with all the pending events.
+ * Note that since the channel is a circular buffer and the pending events
+ * wrap around, then the callback needs to be invoked twice - once with events
+ * upto the "end" of the circular buffer and then with events starting from the
+ * "start" of the circular buffer.
+ * TODO: To prevent thrashing of the tx_done callback in case the circular
+ * buffer is being operated under almost full condition, obey the threshold
+ * specified at the registration. The tx_done callback is only called once the
+ * circular buffer occupancy is below the specified threshold.
+ */
+/*
+ * Function: l0rca_poll_callback
+ *
+ * Description: Scan the incoming channels and call the receive callback
+ * (if any) in case an event is pending to be processed.
+ * Update the read pointer. Next scan the outgoing channels
+ * and if the channel was full, call the transmit done callback
+ * so that events may be sent.
+ *
+ * Arguments: None
+ *
+ * Returns: 0 if no events were processed, else 1.
+ *
+ * Note: It is possible that this routine is called from interrupt
+ * context. The callbacks invoked *must* not block.
+ */
+
+int l0rca_poll_callback(void)
+{
+ int i;
+ int rval = 0;
+ rs_event_t ev, *wr_ev_ptr;
+ volatile uint32_t tmpwx, tmprx;
+
+ /* Loop through all channels with incoming events */
+ for (i = 1; i < NUM_L0RCA_CHANNELS; i+=2)
+ {
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[i];
+
+ /* GDB handled separately.
+ */
+ if (i == L0RCA_CH_KGDB_DOWN)
+ continue;
+
+ if ((0 == ch_ptr->reg_count) || (NULL == ch_ptr->down_handler))
+ continue;
+
+ if ((ch_ptr->ch_ridx == 0) || (ch_ptr->ch_widx == 0)) {
+ continue;
+ }
+
+ if (!ch_ptr->num_obj) {
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ continue;
+ }
+
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ if (tmpwx != tmprx)
+ {
+ wr_ev_ptr = (rs_event_t*)&ch_ptr->ch_buf_ptr[tmprx & (ch_ptr->num_obj - 1)];
+ /* read the entire event */
+ SSMEMGET((uint32_t*)&ev, (uint32_t*)wr_ev_ptr, rs_sizeof_event(RS_MSG_LEN));
+
+ /* Call callback routine with one event */
+ (ch_ptr->down_handler)(i, &ev, 1);
+
+ /* We are done with this event */
+ rval++;
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ SSPUT32(ch_ptr->ch_ridx, tmprx + 1);
+
+ /* Let L0 know that the event has been drained */
+ send_intr_to_l0(ch_ptr);
+ } /* End of if */
+ } /* End of for incoming channels */
+
+ /* Loop through all channels with outgoing events */
+ for (i = 0; i < NUM_L0RCA_CHANNELS; i+=2)
+ {
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[i];
+
+ /* GDB handled separately.
+ */
+ if (i == L0RCA_CH_KGDB_UP)
+ continue;
+
+ if(NULL != ch_ptr->up_handler)
+ {
+ /* Lock needed for mutex with tx routine */
+ LOCK_CHANNEL(i);
+ if (CHAN_TX_STATE_FULL == GET_CHAN_STATE(ch_ptr))
+ {
+ /* Call the callback if chan no longer full */
+ assert(0 != ch_ptr->reg_count);
+ SSGET32(ch_ptr->ch_widx, tmpwx);
+ SSGET32(ch_ptr->ch_ridx, tmprx);
+ if((tmpwx - tmprx) < ch_ptr->num_obj)
+ {
+ rval++;
+ SET_CHAN_STATE(ch_ptr,CHAN_TX_STATE_AVAIL);
+ UNLOCK_CHANNEL(i);
+ (ch_ptr->up_handler)(i);
+ LOCK_CHANNEL(i);
+ }
+ } /* End of if */
+ UNLOCK_CHANNEL(i);
+ }
+ } /* End of for */
+
+ return rval;
+}
+
+#if defined(CONFIG_CRAY_XT_KGDB) || defined(CONFIG_CRAY_KGDB)
+/* Kernel mode GDB via L0 interface routines.
+ *
+ * Note that this code was derived from gdbl0.c,
+ * which was derived from Linux gdbserial.c,
+ * which contains no copyright notice. Parts
+ * of this may need to be rewritten if a GPL
+ * copyright notice was removed from gdbserial.c
+ *
+ */
+
+#define LRPRINTF printk
+
+#undef PRNT /* define for debug printing */
+
+#define GDB_BUF_SIZE 512 /* power of 2, please */
+
+static char gdb_buf[GDB_BUF_SIZE] ;
+static int gdb_buf_in_inx ;
+static int gdb_buf_in_cnt ;
+static int gdb_buf_out_inx ;
+
+static int initialized = -1;
+
+int gdb_store_overflow;
+int gdb_read_error;
+
+/* Preset this with constant fields in the event header */
+static rs_event_t l0rca_gdb_ev_template = {0};
+
+/*
+ * Function: rcal0_gdb_template
+ *
+ * Description: Hand craft a event header to be sent with each outgoing event
+ *
+ * Arguments: None.
+ *
+ * Returns: None
+ *
+ * Note: The len & timestamp are not filled in.
+ */
+static void rcal0_gdb_template(void)
+{
+ l0rca_gdb_ev_template.ev_id = ec_kgdb_output;
+ l0rca_gdb_ev_template.ev_gen = RCA_MKSVC(RCA_INST_ANY,
+ RCA_SVCTYPE_TEST0, l0rca_get_proc_id());
+ l0rca_gdb_ev_template.ev_src = l0rca_gdb_ev_template.ev_gen;
+ l0rca_gdb_ev_template.ev_priority = RCA_LOG_DEBUG;
+ l0rca_gdb_ev_template.ev_flag = 0; /* For Debugging */
+
+ /* Timestamp, len & data is filled at the time of sending event */
+}
+
+/*
+ * Function: l0_gdb_init
+ *
+ * Description: Take steps to set things up for early_printk output
+ *
+ * Arguments: struct console *console IN: pointer to console struct
+ * char *input IN: Not used
+ *
+ * Returns: zero (SUCCESS)
+ */
+int l0_gdb_init(void)
+{
+
+ int ret;
+
+ /* RCA already initialized on Q
+ */
+ /* Read the configuration information provided by L0 */
+ l0rca_init_config();
+
+ /* Setup the Event template to use for outgoing events */
+ rcal0_gdb_template();
+
+ /* Set up channel internal state by calling
+ * registration routines.
+ */
+
+ /* Register with the KGDB out channel to send gdb data */
+ ret = register_ch_up (L0RCA_CH_KGDB_UP, NULL, 0, 0);
+
+ if (!ret)
+ {
+ /* Register with the KGDB in channel to receive gdb commands */
+ ret = register_ch_down(L0RCA_CH_KGDB_DOWN, NULL, 0);
+ }
+
+ return ret;
+}
+
+extern void breakpoint(void);
+
+
+int gdb_hook(void)
+{
+ int retval;
+
+ /*
+ * Call GDB routine to setup the exception vectors for the debugger.
+ */
+
+ /* Setup both kgdb channels */
+ retval = l0_gdb_init();
+
+ /* TODO: on Linux the call to printk in this
+ * routine no longer generate output.
+ *
+ * Did something change in the setup of the
+ * console channel?
+ */
+ if (retval == 0)
+ {
+ initialized = 1;
+ } else
+ {
+ initialized = 0;
+ LRPRINTF("gdb_hook: l0_gdb_init() failed: %d\n", retval);
+ return (-1);
+ }
+ return 0;
+
+} /* gdb_hook */
+
+/*
+ * Function: l0rca_kgdb_down_getc()
+ *
+ * Description: Checks for an event on the KGDB DOWN L0 channel
+ * and returns the first character, other characters
+ * are thrown away. Used to detect control C for
+ * breakpointing the kernel. Returns 0 when no
+ * input is available or on error where gdb_read_error
+ * is incremented.
+ */
+int l0rca_kgdb_down_getc(void)
+{
+ char *chp;
+ int ret, len;
+ rs_event_t ev = {0};
+
+ if ((ret = l0rca_ch_get_event(L0RCA_CH_KGDB_DOWN, &ev)) <= 0) {
+ if (ret < 0)
+ gdb_read_error++;
+ return 0;
+ }
+ l0rca_event_data(&ev, (void *)&chp, &len);
+ if (len > 0)
+ return *chp;
+ return 0;
+} /* l0rca_kgdb_down_getc */
+
+/* Only routines used by the Kernel trap mode KGDB interface
+ * should follow this point.
+ *
+ * These functions should only call other `gdb_.*' functions.
+ *
+ * This is best done by examining the assembly file and
+ * ensuring that all assembly call statements only call
+ * routines that match `gdb_.*', in all of the routines
+ * that follow.
+ *
+ * NB: SETTING BREAKPOINTS IN THE FOLLOWING ROUTINES MAY
+ * BREAK THE KERNEL DEBUGGER.
+ */
+
+/*
+ * Function: gdb_l0rca_event_data
+ *
+ * Description: Clone of l0rca_event_data to only be called by kernel GDB.
+ * data portion of the event.
+ *
+ * Arguments: rs_event_t *evp IN: Event whose data is of interest
+ * void **data OUT: Upon return will point to data portion of event
+ * int32_t *len OUT: Upon return will have the length of the data
+ * portion of the event
+ *
+ * Returns: No Return Value.
+ */
+void gdb_l0rca_event_data(rs_event_t *evp, void **data, int32_t *len)
+{
+ /* Get length and data portion from the event */
+ *len = evp->ev_len;
+ *data = &evp->ev_data;
+} /* gdb_l0rca_event_data */
+
+/*
+ * Function: gdb_set_chan_tx_state
+ *
+ * Description: Clone of set_chan_tx_state to only be called by kernel GDB.
+ */
+static inline void gdb_set_chan_tx_state(l0rca_mapped_ch_t *ch_ptr)
+{
+
+ int not_full = (*ch_ptr->ch_widx - *ch_ptr->ch_ridx) < ch_ptr->num_obj;
+
+ SET_CHAN_STATE(ch_ptr, not_full ? CHAN_TX_STATE_AVAIL : CHAN_TX_STATE_FULL);
+}
+
+static void gdb_memcpy(void *dest, const void *src, int cnt)
+{
+ int i;
+ char *pd = dest;
+ const char *ps = src;
+
+ for (i = 0; i < cnt; i++)
+ pd[i] = ps[i];
+}
+
+/*
+ * Function: gdb_ch_send_data
+ *
+ * Description: Clone of ch_send_data to be only called by kernel GDB.
+ *
+ */
+int gdb_ch_send_data(int ch_num, const rs_event_t *ev_hdr,
+ void* buf, unsigned int len)
+{
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+
+ /* No registration checks needed.
+ */
+
+ /* Optimize for buf not full and only one event needed to send */
+
+ /*
+ * Masking of indexes is not needed for the check in while() below.
+ * Both widx & ridx are unsigned and increase monotonically such that
+ * ridx cannot lag behind widx by more than the circular buf size i.e.
+ * num_obj. widx will overflow before ridx.
+ * 'widx - ridx' will always yield the difference between the two
+ * even if widx has overflowed and ridx has not yet overflowed.
+ */
+ while((*ch_ptr->ch_widx - *ch_ptr->ch_ridx) < ch_ptr->num_obj)
+ {
+ rs_event_t *wr_ev_ptr =
+ &ch_ptr->ch_buf_ptr[*ch_ptr->ch_widx & (ch_ptr->num_obj - 1)];
+
+ /* Copy same header for each event */
+ gdb_memcpy(wr_ev_ptr, (void *) ev_hdr, rs_ev_hdr_sz);
+
+ /* Copy the data portion over */
+ wr_ev_ptr->ev_len = (RS_MSG_LEN > len) ? len : RS_MSG_LEN;
+ gdb_memcpy((char *)wr_ev_ptr + rs_ev_hdr_sz, buf,
+ wr_ev_ptr->ev_len);
+
+ /* TODO: Set the timestamp in each event */
+#if 0
+ wr_ev_ptr->_ev_stp = 0x0;
+#endif /* 0 */
+
+ len -= wr_ev_ptr->ev_len;
+ /*
+ * After updating the widx, DO NOT access any field in that
+ * event. Though not desirable, the reader is free to alter
+ * fields in the event.
+ */
+ (*ch_ptr->ch_widx)++;
+ gdb_set_chan_tx_state(ch_ptr);
+
+ /* Let L0 know an ev is available */
+ send_intr_to_l0(ch_ptr);
+
+ if (0 == len)
+ break;
+
+ buf += RS_MSG_LEN;
+ } /* End of while */
+
+ return len; /* bytes remaining, if any */
+}
+
+/*
+ * Function: gdb_l0rca_ch_get_event
+ *
+ * Description: Clone of l0rca_ch_get_event to only be called by kernel GDB.
+ * the read pointer is advanced.
+ *
+ */
+int gdb_l0rca_ch_get_event(int ch_num, rs_event_t *evp)
+{
+ l0rca_mapped_ch_t *ch_ptr = &l0rca_early_cfg.ch_data[ch_num];
+ int ret = 0;
+
+ /* No registration checks needed
+ */
+
+ if(*ch_ptr->ch_widx != *ch_ptr->ch_ridx)
+ {
+ rs_event_t *wr_ev_ptr =
+ &ch_ptr->ch_buf_ptr[*ch_ptr->ch_ridx & (ch_ptr->num_obj - 1)];
+
+ /* Copy over the event */
+ gdb_memcpy(evp, (void *)wr_ev_ptr, wr_ev_ptr->ev_len+rs_ev_hdr_sz);
+
+ /* Update the rd index */
+ (*ch_ptr->ch_ridx)++;
+
+ /* Let L0 know that the event has been drained */
+ send_intr_to_l0(ch_ptr);
+
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * Function: gdb_store_char_in_buf
+ *
+ * Description: Check for overflow and place the incoming character into
+ * the local buffer for later retreival.
+ *
+ * Arguments: int ch IN: Incoming character
+ *
+ * Returns: zero - (SUCCESS)
+ * -1 - Buffer Overflow
+ */
+static int gdb_store_char_in_buf(char ch)
+{
+ if (gdb_buf_in_cnt >= GDB_BUF_SIZE)
+ { /* buffer overflow, clear it */
+ gdb_buf_in_inx = 0 ;
+ gdb_buf_in_cnt = 0 ;
+ gdb_buf_out_inx = 0 ;
+ return -1;
+ }
+
+ gdb_buf[gdb_buf_in_inx++] = ch;
+ gdb_buf_in_inx &= (GDB_BUF_SIZE - 1) ;
+ gdb_buf_in_cnt++;
+
+ return 0;
+}
+
+/*
+ * Wait until the interface can accept a char, then write it.
+ */
+static void gdb_write_char(char chr)
+{
+ int ret;
+
+ while ((ret = gdb_ch_send_data(L0RCA_CH_KGDB_UP, &l0rca_gdb_ev_template,
+ (void *)&chr, sizeof(char))) > 0)
+ {
+ /* Buffer full; keep trying.... */
+ ;
+ } /* End of while */
+
+ return;
+} /* gdb_write_char */
+
+/*
+ * gdb_getc
+ *
+ * This is a GDB stub routine. It waits for a character from the
+ * L0 interface and then returns it.
+ */
+int gdb_getc(int wait)
+{
+ char *chp, *end_buf;
+ int ret, len;
+ rs_event_t ev = {0};
+
+#ifdef PRNT
+ LRPRINTF("gdb_getc:") ;
+#endif
+ /* First check if the receive callback has any chars pending */
+ if (gdb_buf_in_cnt == 0)
+ {
+ /* No chars from rx_callback; Loop until a char is available */
+ while ((ret = gdb_l0rca_ch_get_event(L0RCA_CH_KGDB_DOWN, &ev))
+ <= 0)
+ {
+ if (ret < 0)
+ {
+ /* Error!! This is death */
+ gdb_read_error++;
+ return -1;
+ }
+ if (! wait)
+ return 0;
+ } /* End of while */
+
+ /* Get the data in the event */
+ gdb_l0rca_event_data(&ev, (void *)&chp, &len);
+
+ /* We have an event; fill the local buffer */
+ for (end_buf = chp+len; chp < end_buf; chp++)
+ {
+ if (gdb_store_char_in_buf(*chp) < 0)
+ {
+ gdb_store_overflow++;
+ }
+ } /* End of for */
+ } /* End of if */
+
+ /* There should be something for us in the local buffer now */
+ chp = &gdb_buf[gdb_buf_out_inx++] ;
+ gdb_buf_out_inx &= (GDB_BUF_SIZE - 1) ;
+ gdb_buf_in_cnt--;
+
+#ifdef PRNT
+ LRPRINTF("%c\n", *chp > ' ' && *chp < 0x7F ? *chp : ' ') ;
+#endif
+ return(*chp) ;
+
+} /* gdb_getc */
+
+/*
+ * gdb_putc
+ *
+ * This is a GDB stub routine. It waits until the interface is ready
+ * to transmit a char and then sends it. If there is no serial
+ * interface connection then it simply returns to its caller, having
+ * pretended to send the char.
+ */
+int gdb_putc(char chr)
+{
+#ifdef PRNT
+ LRPRINTF("gdb_putc: chr=%02x '%c'\n", chr,
+ chr > ' ' && chr < 0x7F ? chr : ' ') ;
+#endif
+
+ gdb_write_char(chr); /* this routine will wait */
+
+ return 1;
+
+} /* gdb_putc */
+
+int putDebugPacket(char *buf, int n)
+{
+ int ret = -1;
+
+ /* Loop sending the data */
+ while (n)
+ {
+ if ((ret = gdb_ch_send_data(L0RCA_CH_KGDB_UP, &l0rca_gdb_ev_template,
+ (void *)buf, n)) <= 0) {
+ /* Either error or we are done */
+ break;
+ }
+
+ if (n > ret) {
+ /* Some bytes were sent, point to the remaining data */
+ buf += (n - ret);
+ n = ret;
+ }
+ }
+
+ return ret;
+}
+#endif /* CONFIG_CRAY_KGDB */
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/resource.h>
+#include <arch/page.h>
+#include <arch/io.h>
+#include <arch/sections.h>
+#include <arch/e820.h>
+
+/**
+ * Standard PC resources.
+ */
+struct resource standard_io_resources[] = {
+ { .name = "dma1", .start = 0x00, .end = 0x1f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "pic1", .start = 0x20, .end = 0x21,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "timer0", .start = 0x40, .end = 0x43,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "timer1", .start = 0x50, .end = 0x53,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "keyboard", .start = 0x60, .end = 0x6f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "pic2", .start = 0xa0, .end = 0xa1,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "dma2", .start = 0xc0, .end = 0xdf,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "fpu", .start = 0xf0, .end = 0xff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
+};
+
+#define STANDARD_IO_RESOURCES \
+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
+
+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
+
+struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_RAM,
+};
+struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_RAM,
+};
+
+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
+
+static struct resource system_rom_resource = {
+ .name = "System ROM",
+ .start = 0xf0000,
+ .end = 0xfffff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource extension_rom_resource = {
+ .name = "Extension ROM",
+ .start = 0xe0000,
+ .end = 0xeffff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource adapter_rom_resources[] = {
+ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM },
+ { .name = "Adapter ROM", .start = 0, .end = 0,
+ .flags = IORESOURCE_ROM }
+};
+
+#define ADAPTER_ROM_RESOURCES \
+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+
+static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+ .end = 0xc7fff,
+ .flags = IORESOURCE_ROM,
+};
+
+static struct resource video_ram_resource = {
+ .name = "Video RAM area",
+ .start = 0xa0000,
+ .end = 0xbffff,
+ .flags = IORESOURCE_RAM,
+};
+
+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+static int __init
+romchecksum(unsigned char *rom, unsigned long length)
+{
+ unsigned char *p, sum = 0;
+
+ for (p = rom; p < rom + length; p++)
+ sum += *p;
+ return sum == 0;
+}
+
+static void __init
+probe_roms(void)
+{
+ unsigned long start, length, upper;
+ unsigned char *rom;
+ int i;
+
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ video_rom_resource.start = start;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+ video_rom_resource.end = start + length - 1;
+
+ request_resource(&iomem_resource, &video_rom_resource);
+ break;
+ }
+
+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+ if (start < upper)
+ start = upper;
+
+ /* system rom */
+ request_resource(&iomem_resource, &system_rom_resource);
+ upper = system_rom_resource.start;
+
+ /* check for extension rom (ignore length byte!) */
+ rom = isa_bus_to_virt(extension_rom_resource.start);
+ if (romsignature(rom)) {
+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
+ if (romchecksum(rom, length)) {
+ request_resource(&iomem_resource, &extension_rom_resource);
+ upper = extension_rom_resource.start;
+ }
+ }
+
+ /* check for adapter roms on 2k boundaries */
+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom, length))
+ continue;
+
+ adapter_rom_resources[i].start = start;
+ adapter_rom_resources[i].end = start + length - 1;
+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+ start = adapter_rom_resources[i++].end & ~2047UL;
+ }
+}
+
+void __init
+init_resources(void)
+{
+ unsigned int i;
+
+ code_resource.start = virt_to_phys(&_text);
+ code_resource.end = virt_to_phys(&_etext)-1;
+ data_resource.start = virt_to_phys(&_etext);
+ data_resource.end = virt_to_phys(&_edata)-1;
+
+ /*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+ probe_roms();
+ e820_reserve_resources();
+
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/smp.h>
+#include <lwk/task.h>
+#include <arch/i387.h>
+
+struct task_struct *
+__arch_context_switch(struct task_struct *prev_p, struct task_struct *next_p)
+{
+ struct thread_struct *prev = &prev_p->arch.thread;
+ struct thread_struct *next = &next_p->arch.thread;
+ id_t cpu = this_cpu;
+ struct tss_struct *tss = &per_cpu(tss, cpu);
+
+ /* Update TSS */
+ tss->rsp0 = next->rsp0;
+
+ /* Switch DS and ES segment registers */
+ asm volatile("mov %%es,%0" : "=m" (prev->es));
+ if (unlikely(next->es | prev->es))
+ loadsegment(es, next->es);
+ asm volatile("mov %%ds,%0" : "=m" (prev->ds));
+ if (unlikely(next->ds | prev->ds))
+ loadsegment(ds, next->ds);
+
+ /* Load FS and GS segment registers (used for thread local storage) */
+ {
+ unsigned int fsindex;
+ asm volatile("movl %%fs,%0" : "=r" (fsindex));
+ if (unlikely(fsindex | next->fsindex | prev->fs)) {
+ loadsegment(fs, next->fsindex);
+ if (fsindex)
+ prev->fs = 0;
+ }
+ if (next->fs)
+ wrmsrl(MSR_FS_BASE, next->fs);
+ prev->fsindex = fsindex;
+ }
+ {
+ unsigned int gsindex;
+ asm volatile("movl %%gs,%0" : "=r" (gsindex));
+ if (unlikely(gsindex | next->gsindex | prev->gs)) {
+ load_gs_index(next->gsindex);
+ if (gsindex)
+ prev->gs = 0;
+ }
+ if (next->gs)
+ wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
+ prev->gsindex = gsindex;
+ }
+
+ /* Update the CPU's PDA (per-CPU data area) */
+ write_pda(pcurrent, next_p);
+
+ /* If necessary, save and restore floating-point state */
+ if (prev_p->arch.flags & TF_USED_FPU)
+ fpu_save_state(prev_p);
+ if (next_p->arch.flags & TF_USED_FPU) {
+ clts();
+ fpu_restore_state(next_p);
+ } else {
+ /*
+ * Set the TS flag of CR0 so that FPU/MMX/SSE instructions
+ * will cause a "Device not available" exception. The exception
+ * handler will then initialize the FPU state and set the
+ * task's TF_USED_FPU flag. From that point on, the task
+ * should never experience another "Device not available"
+ * exception.
+ */
+ stts();
+ }
+
+ return prev_p;
+}
+
+void
+arch_idle_task_loop_body(void)
+{
+ /* Issue HALT instruction,
+ * which should put CPU in a lower power mode */
+ halt();
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/bootmem.h>
+#include <lwk/smp.h>
+#include <arch/bootsetup.h>
+#include <arch/e820.h>
+#include <arch/page.h>
+#include <arch/sections.h>
+#include <arch/proto.h>
+#include <arch/mpspec.h>
+#include <arch/pda.h>
+#include <arch/io_apic.h>
+
+/**
+ * Bitmap of of PTE/PMD entry flags that are supported.
+ * This is AND'ed with a PTE/PMD entry before it is installed.
+ */
+unsigned long __supported_pte_mask __read_mostly = ~0UL;
+
+/**
+ * Bitmap of features enabled in the CR4 register.
+ */
+unsigned long mmu_cr4_features;
+
+/**
+ * Start and end addresses of the initrd image.
+ */
+paddr_t __initdata initrd_start;
+paddr_t __initdata initrd_end;
+
+/**
+ * The init_task ELF image.
+ */
+paddr_t __initdata init_elf_image;
+
+/**
+ * Base address and size of the Extended BIOS Data Area.
+ */
+paddr_t __initdata ebda_addr;
+size_t __initdata ebda_size;
+#define EBDA_ADDR_POINTER 0x40E
+
+/**
+ * Finds the address and length of the Extended BIOS Data Area.
+ */
+static void __init
+discover_ebda(void)
+{
+ /*
+ * There is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E
+ */
+ ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
+ ebda_addr <<= 4;
+
+ ebda_size = *(unsigned short *)__va(ebda_addr);
+
+ /* Round EBDA up to pages */
+ if (ebda_size == 0)
+ ebda_size = 1;
+ ebda_size <<= 10;
+ ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
+ if (ebda_size > 64*1024)
+ ebda_size = 64*1024;
+}
+
+/**
+ * This sets up the bootstrap memory allocator. It is a simple
+ * bitmap based allocator that tracks memory at a page grandularity.
+ * Once the bootstrap process is complete, each unallocated page
+ * is added to the real memory allocator's free pool. Memory allocated
+ * during bootstrap remains allocated forever, unless explicitly
+ * freed before turning things over to the real memory allocator.
+ */
+static void __init
+setup_bootmem_allocator(
+ unsigned long start_pfn,
+ unsigned long end_pfn
+)
+{
+ unsigned long bootmap_size, bootmap;
+
+ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
+ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
+ if (bootmap == -1L)
+ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
+ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
+ e820_bootmem_free(0, end_pfn << PAGE_SHIFT);
+ reserve_bootmem(bootmap, bootmap_size);
+}
+
+/**
+ * Mark in-use memory regions as reserved.
+ * This prevents the bootmem allocator from allocating them.
+ */
+static void __init
+reserve_memory(void)
+{
+ /* Reserve the kernel page table memory */
+ reserve_bootmem(table_start << PAGE_SHIFT,
+ (table_end - table_start) << PAGE_SHIFT);
+
+ /* Reserve kernel memory */
+ reserve_bootmem(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
+
+ /* Reserve physical page 0... it's a often a special BIOS page */
+ reserve_bootmem(0, PAGE_SIZE);
+
+ /* Reserve the Extended BIOS Data Area memory */
+ if (ebda_addr)
+ reserve_bootmem(ebda_addr, ebda_size);
+
+ /* Reserve SMP trampoline */
+ reserve_bootmem(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
+
+ /* Find and reserve boot-time SMP configuration */
+ find_mp_config();
+
+ /* Reserve memory used by the initrd image */
+ if (LOADER_TYPE && INITRD_START) {
+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
+ printk(KERN_DEBUG
+ "reserving memory used by initrd image\n");
+ printk(KERN_DEBUG
+ " INITRD_START=0x%lx, INITRD_SIZE=%ld bytes\n",
+ (unsigned long) INITRD_START,
+ (unsigned long) INITRD_SIZE);
+ reserve_bootmem(INITRD_START, INITRD_SIZE);
+ initrd_start = INITRD_START;
+ initrd_end = initrd_start+INITRD_SIZE;
+ init_elf_image = initrd_start;
+ } else {
+ printk(KERN_ERR
+ "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ (unsigned long)(INITRD_START + INITRD_SIZE),
+ (unsigned long)(end_pfn << PAGE_SHIFT));
+ initrd_start = 0;
+ }
+ }
+}
+
+/**
+ * This initializes a per-CPU area for each CPU.
+ *
+ * TODO: The PDA and per-CPU areas are pretty tightly wound. It should be
+ * possible to make the per-CPU area *be* the PDA, or put another way,
+ * point %GS at the per-CPU area rather than the PDA. All of the PDA's
+ * current contents would become normal per-CPU variables.
+ */
+static void __init
+setup_per_cpu_areas(void)
+{
+ int i;
+ size_t size;
+
+ /*
+ * There is an ELF section containing all per-CPU variables
+ * surrounded by __per_cpu_start and __per_cpu_end symbols.
+ * We create a copy of this ELF section for each CPU.
+ */
+ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
+
+ for_each_cpu_mask (i, cpu_present_map) {
+ char *ptr;
+
+ ptr = alloc_bootmem_aligned(size, PAGE_SIZE);
+ if (!ptr)
+ panic("Cannot allocate cpu data for CPU %d\n", i);
+
+ /*
+ * Pre-bias data_offset by subtracting its offset from
+ * __per_cpu_start. Later, per_cpu() will calculate a
+ * per_cpu variable's address with:
+ *
+ * addr = offset_in_percpu_ELF_section + data_offset
+ * = (__per_cpu_start + offset) + (ptr - __per_cpu_start)
+ * = offset + ptr
+ */
+ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
+
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ }
+}
+
+static inline int get_family(int cpuid)
+{
+ int base = (cpuid>>8) & 0xf;
+ int extended = (cpuid>>20) &0xff;
+
+ return (0xf == base) ? base + extended : base;
+}
+
+/**
+ * Architecture specific initialization.
+ * This is called from start_kernel() in init/main.c.
+ *
+ * NOTE: Ordering is usually important. Do not move things
+ * around unless you know what you are doing.
+ */
+void __init
+setup_arch(void)
+{
+ /*
+ * Figure out which memory regions are usable and which are reserved.
+ * This builds the "e820" map of memory from info provided by the
+ * BIOS.
+ */
+ setup_memory_region();
+
+ /*
+ * Get the bare minimum info about the bootstrap CPU... the
+ * one we're executing on right now. Latter on, the full
+ * boot_cpu_data and cpu_info[boot_cpu_id] structures will be
+ * filled in completely.
+ */
+ boot_cpu_data.logical_id = 0;
+ early_identify_cpu(&boot_cpu_data);
+
+ /*
+ * Find the Extended BIOS Data Area.
+ * (Not sure why exactly we need this, probably don't.)
+ */
+ discover_ebda();
+
+ /*
+ * Initialize the kernel page tables.
+ * The kernel page tables map an "identity" map of all physical memory
+ * starting at virtual address PAGE_OFFSET. When the kernel executes,
+ * it runs inside of the identity map... memory below PAGE_OFFSET is
+ * from whatever task was running when the kernel got invoked.
+ */
+ init_kernel_pgtables(0, (end_pfn_map << PAGE_SHIFT));
+
+ /*
+ * Initialize the bootstrap dynamic memory allocator.
+ * alloc_bootmem() will work after this.
+ */
+ setup_bootmem_allocator(0, end_pfn);
+ reserve_memory();
+
+ /*
+ * Get the multiprocessor configuration...
+ * number of CPUs, PCI bus info, APIC info, etc.
+ */
+ get_mp_config();
+
+ /*
+ * Initialize resources. Resources reserve sections of normal memory
+ * (iomem) and I/O ports (ioport) for devices and other system
+ * resources. For each resource type, there is a tree which tracks
+ * which regions are in use. This eliminates the possiblity of
+ * conflicts... e.g., two devices trying to use the same iomem region.
+ */
+ init_resources();
+
+ /*
+ * Initialize per-CPU areas, one per CPU.
+ * Variables defined with DEFINE_PER_CPU() end up in the per-CPU area.
+ * This provides a mechanism for different CPUs to refer to their
+ * private copy of the variable using the same name
+ * (e.g., get_cpu_var(foo)).
+ */
+ setup_per_cpu_areas();
+
+ /*
+ * Initialize the IDT table and interrupt handlers.
+ */
+ interrupts_init();
+
+ /*
+ * Map the APICs into the kernel page tables.
+ *
+ * Each CPU has its own Local APIC. All Local APICs are memory mapped
+ * to the same virtual address region. A CPU accesses its Local APIC by
+ * accessing the region. A CPU cannot access another CPU's Local APIC.
+ *
+ * Each Local APIC is connected to all IO APICs in the system. Each IO
+ * APIC is mapped to a different virtual address region. A CPU accesses
+ * a given IO APIC by accessing the appropriate region. All CPUs can
+ * access all IO APICs.
+ */
+ lapic_map();
+ ioapic_map();
+
+ /*
+ * Initialize the virtual system call code/data page.
+ * The vsyscall page is mapped into every task's address space at a
+ * well-known address. User code can call functions in this page
+ * directly, providing a light-weight mechanism for read-only system
+ * calls such as gettimeofday().
+ */
+ vsyscall_map();
+
+ cpu_init();
+
+ current->cpumask = cpu_present_map;
+
+ ioapic_init();
+
+ lapic_set_timer(1000000000);
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+#include <lwk/ptrace.h>
+#include <lwk/version.h>
+#include <lwk/kallsyms.h>
+#include <arch/msr.h>
+
+/**
+ * Prints a nicely formatted address to the console.
+ * This attempts to look up the symbolic name of the address.
+ */
+void
+printk_address(unsigned long address)
+{
+ unsigned long offset = 0, symsize;
+ const char *symname;
+ char namebuf[128];
+
+ symname = kallsyms_lookup(address, &symsize, &offset, namebuf);
+ if (!symname) {
+ printk(" [<%016lx>]\n", address);
+ return;
+ }
+ printk(" [<%016lx>] %s+0x%lx/0x%lx\n",
+ address, symname, offset, symsize);
+}
+
+
+/**
+ * Print a stack trace of the context.
+ */
+void
+kstack_trace(
+ void * rbp_v
+)
+{
+#ifndef CONFIG_FRAME_POINTER
+ printk( "Unable to generate stack trace "
+ "(recompile with CONFIG_FRAME_POINTER)\n" );
+ return;
+#endif
+
+ uint64_t * rbp = rbp_v;
+ if( rbp == 0 )
+ asm( "mov %%rbp, %0" : "=r"(rbp) );
+
+ int max_depth = 16;
+ printk( "Stack trace from RBP %p\n", rbp );
+
+ while( rbp && max_depth-- )
+ {
+ printk_address( rbp[1] );
+ rbp = (uint64_t*) *rbp;
+ }
+}
+
+
+/**
+ * Prints x86_64 general purpose registers and friends to the console.
+ * NOTE: This prints the CPU register values contained in the passed in
+ * 'struct pt_regs *'. It DOES NOT print the current values in
+ * the CPU's registers.
+ */
+void
+show_registers(struct pt_regs * regs)
+{
+ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
+ unsigned int fsindex, gsindex;
+ unsigned int ds, cs, es;
+ bool user_fault = (regs->rip < PAGE_OFFSET);
+ char namebuf[128];
+
+ printk("Task ID: %d Task Name: %s UTS_RELEASE: %s\n",
+ current->id, current->name, UTS_RELEASE);
+ printk("RIP: %04lx:%016lx (%s)\n", regs->cs & 0xffff, regs->rip,
+ (user_fault) ? "user-context"
+ : kallsyms_lookup(regs->rip, NULL, NULL, namebuf));
+ printk("RSP: %04lx:%016lx EFLAGS: %08lx ERR: %08lx\n",
+ regs->ss, regs->rsp, regs->eflags, regs->orig_rax);
+ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
+ regs->rax, regs->rbx, regs->rcx);
+ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
+ regs->rdx, regs->rsi, regs->rdi);
+ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
+ regs->rbp, regs->r8, regs->r9);
+ printk("R10: %016lx R11: %016lx R12: %016lx\n",
+ regs->r10, regs->r11, regs->r12);
+ printk("R13: %016lx R14: %016lx R15: %016lx\n",
+ regs->r13, regs->r14, regs->r15);
+
+ asm("movl %%ds,%0" : "=r" (ds));
+ asm("movl %%cs,%0" : "=r" (cs));
+ asm("movl %%es,%0" : "=r" (es));
+ asm("movl %%fs,%0" : "=r" (fsindex));
+ asm("movl %%gs,%0" : "=r" (gsindex));
+
+ rdmsrl(MSR_FS_BASE, fs);
+ rdmsrl(MSR_GS_BASE, gs);
+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+
+ asm("movq %%cr0, %0": "=r" (cr0));
+ asm("movq %%cr2, %0": "=r" (cr2));
+ asm("movq %%cr3, %0": "=r" (cr3));
+ asm("movq %%cr4, %0": "=r" (cr4));
+
+ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
+ fs, fsindex, gs, gsindex, shadowgs);
+ printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
+ printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
+
+ if (!user_fault)
+ kstack_trace( (void *) regs->rbp );
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+#include <arch/prctl.h>
+#include <arch/uaccess.h>
+
+
+long
+do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
+{
+ int ret = 0;
+ int doit = task == current;
+
+ switch (code) {
+ case ARCH_SET_GS:
+ if (addr >= task->arch.addr_limit)
+ return -EPERM;
+
+ task->arch.thread.gsindex = 0;
+ task->arch.thread.gs = addr;
+ if (doit) {
+ /* The kernel's %gs is currently loaded, so this
+ call is needed to set the user version. */
+ load_gs_index(0);
+ ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+ }
+
+ break;
+ case ARCH_SET_FS:
+ /* Not strictly needed for fs, but do it for symmetry
+ with gs */
+ if (addr >= task->arch.addr_limit)
+ return -EPERM;
+
+ task->arch.thread.fsindex = 0;
+ task->arch.thread.fs = addr;
+ if (doit) {
+ /* The kernel doesn't use %fs so we can set it
+ directly. set the selector to 0 to not confuse
+ __switch_to */
+ asm volatile("movl %0,%%fs" :: "r" (0));
+ ret = checking_wrmsrl(MSR_FS_BASE, addr);
+ }
+
+ break;
+ case ARCH_GET_FS: {
+ unsigned long base;
+ if (doit)
+ rdmsrl(MSR_FS_BASE, base);
+ else
+ base = task->arch.thread.fs;
+ ret = put_user(base, (unsigned long __user *)addr);
+ break;
+ }
+ case ARCH_GET_GS: {
+ unsigned long base;
+ unsigned gsindex;
+ if (doit) {
+ asm("movl %%gs,%0" : "=r" (gsindex));
+ if (gsindex)
+ rdmsrl(MSR_KERNEL_GS_BASE, base);
+ else
+ base = task->arch.thread.gs;
+ }
+ else
+ base = task->arch.thread.gs;
+ ret = put_user(base, (unsigned long __user *)addr);
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+
+long
+sys_arch_prctl(int code, unsigned long addr)
+{
+ return do_arch_prctl(current, code, addr);
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/linkage.h>
+#include <lwk/cache.h>
+#include <lwk/errno.h>
+#include <arch/asm-offsets.h>
+
+/**
+ * This generate prototypes for all system call handlers.
+ */
+#define __SYSCALL(nr, sym) extern long sym(void);
+#undef _ARCH_X86_64_UNISTD_H
+#include <arch/unistd.h>
+
+/**
+ * Setup for the include of <arch/unistd.h> in the sys_call_table[]
+ * definition below.
+ */
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [ nr ] = sym,
+#undef _ARCH_X86_64_UNISTD_H
+
+/**
+ * Prototype for system call handler functions.
+ */
+typedef long (*syscall_ptr_t)(void);
+
+/**
+ * Dummy handler for unimplemented system calls.
+ */
+long syscall_not_implemented(void)
+{
+ unsigned long syscall_number;
+
+ /* On entry to function, syscall # is in %rax register */
+ asm volatile("mov %%rax, %0" : "=r"(syscall_number)::"%rax");
+
+ printk(KERN_DEBUG "System call not implemented! "
+ "(syscall_number=%lu)\n", syscall_number);
+// return -ENOSYS;
+ return 0;
+}
+
+/**
+ * This is the system call table. The system_call() function in entry.S
+ * uses this table to determine the handler function to call for each
+ * system call. The table is indexed by system call number.
+ */
+const syscall_ptr_t sys_call_table[__NR_syscall_max+1] = {
+ [0 ... __NR_syscall_max] = syscall_not_implemented,
+ #include <arch/unistd.h>
+};
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/aspace.h>
+#include <lwk/task.h>
+#include <arch/ptrace.h>
+
+int
+arch_task_create(struct task_struct *task,
+ const start_state_t *start_state)
+{
+ struct pt_regs *regs;
+ kaddr_t kstack_top;
+ kaddr_t initial_ksp;
+
+ regs = ((struct pt_regs *)((kaddr_t)task + TASK_SIZE)) - 1;
+ kstack_top = (kaddr_t)(regs + 1);
+ initial_ksp = (kaddr_t)regs;
+
+ task->arch.thread.rsp0 = kstack_top;
+ task->arch.thread.rsp = initial_ksp;
+ task->arch.thread.userrsp = start_state->stack_ptr;
+
+ /* Mark this as a new-task... arch_context_switch() checks this flag */
+ task->arch.flags = TF_NEW_TASK;
+
+ /* Task's address space is from [0, task->addr_limit) */
+ task->arch.addr_limit = PAGE_OFFSET;
+
+ /* Initialize FPU state */
+ task->arch.thread.i387.fxsave.cwd = 0x37f;
+ task->arch.thread.i387.fxsave.mxcsr = 0x1f80;
+
+ /* CPU control unit uses these fields to start the user task running */
+ if (start_state->aspace_id == KERNEL_ASPACE_ID) {
+ regs->ss = __KERNEL_DS;
+ regs->rsp = (vaddr_t)task + TASK_SIZE;
+ regs->eflags = (1 << 9); /* enable interrupts */
+ regs->cs = __KERNEL_CS;
+ } else {
+ regs->ss = __USER_DS;
+ regs->rsp = start_state->stack_ptr;
+ regs->eflags = (1 << 9); /* enable interrupts */
+ regs->cs = __USER_CS;
+ }
+ regs->rip = start_state->entry_point;
+
+ return 0;
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/spinlock.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/smp.h>
+#include <lwk/time.h>
+#include <arch/io.h>
+#include <arch/apic.h>
+
+#ifdef CONFIG_PC
+/**
+ * Lock that synchronizes access to the Programmable Interval Timer.
+ */
+static DEFINE_SPINLOCK(pit_lock);
+
+/**
+ * This stops the Programmable Interval Timer's periodic system timer
+ * (channel 0). Some systems, BOCHS included, are booted with the PIT system
+ * timer enabled. The LWK doesn't use the PIT, so this function is used during
+ * bootstrap to disable it.
+ */
+static void __init
+pit_stop_timer0(void)
+{
+ unsigned long flags;
+ unsigned PIT_MODE = 0x43;
+ unsigned PIT_CH0 = 0x40;
+
+ spin_lock_irqsave(&pit_lock, flags);
+ outb_p(0x30, PIT_MODE); /* mode 0 */
+ outb_p(0, PIT_CH0); /* LSB system timer interval */
+ outb_p(0, PIT_CH0); /* MSB system timer interval */
+ spin_unlock_irqrestore(&pit_lock, flags);
+}
+
+/**
+ * This uses the Programmable Interval Timer that is standard on all
+ * PC-compatible systems to determine the time stamp counter frequency.
+ *
+ * This uses the speaker output (channel 2) of the PIT. This is better than
+ * using the timer interrupt output because we can read the value of the
+ * speaker with just one inb(), where we need three i/o operations for the
+ * interrupt channel. We count how many ticks the TSC does in 50 ms.
+ *
+ * Returns the detected time stamp counter frequency in KHz.
+ */
+static unsigned int __init
+pit_calibrate_tsc(void)
+{
+ cycles_t start, end;
+ unsigned long flags;
+ unsigned long pit_tick_rate = 1193182UL; /* 1.193182 MHz */
+
+ spin_lock_irqsave(&pit_lock, flags);
+
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+ outb(0xb0, 0x43);
+ outb((pit_tick_rate / (1000 / 50)) & 0xff, 0x42);
+ outb((pit_tick_rate / (1000 / 50)) >> 8, 0x42);
+ start = get_cycles_sync();
+ while ((inb(0x61) & 0x20) == 0);
+ end = get_cycles_sync();
+
+ spin_unlock_irqrestore(&pit_lock, flags);
+
+ return (end - start) / 50;
+}
+#endif
+
+#ifdef CONFIG_CRAY_XT
+/**
+ * The Cray XT platform does not have any real time clocks. Therefore,
+ * we have to inspect various MSRs to determine the CPU frequency and
+ * trust that it is accurate.
+ *
+ * Returns the detected CPU frequency in KHz.
+ *
+ * NOTE: This function should only be used on Cray XT3/XT4/XT? platforms.
+ * While it will work on (some) AMD Opteron K8 and K10 systems, using a
+ * timer based mechanism to detect the actual CPU frequency is preferred.
+ */
+static unsigned int __init
+crayxt_detect_cpu_freq(void)
+{
+ unsigned int MHz = 200;
+ unsigned int lower, upper;
+ int amd_family = cpu_info[this_cpu].arch.x86_family;
+ int amd_model = cpu_info[this_cpu].arch.x86_model;
+
+ if (amd_family == 16) {
+ unsigned int fid; /* current frequency id */
+ unsigned int did; /* current divide id */
+
+ rdmsr(MSR_K10_COFVID_STATUS, lower, upper);
+ fid = lower & 0x3f;
+ did = (lower >> 6) & 0x3f;
+ MHz = 100 * (fid + 0x10) / (1 << did);
+
+ } else if (amd_family == 15) {
+ unsigned int fid; /* current frequency id */
+
+ if (amd_model < 16) {
+ /* Revision C and earlier */
+ rdmsr(MSR_K8_HWCR, lower, upper);
+ fid = (lower >> 24) & 0x3f;
+ } else {
+ /* Revision D and later */
+ rdmsr(MSR_K8_FIDVID_STATUS, lower, upper);
+ fid = lower & 0x3f;
+ }
+
+ switch (fid) {
+ case 0: MHz *= 4; break;
+ case 2: MHz *= 5; break;
+ case 4: MHz *= 6; break;
+ case 6: MHz *= 7; break;
+ case 8: MHz *= 8; break;
+ case 10: MHz *= 9; break;
+ case 12: MHz *= 10; break;
+ case 14: MHz *= 11; break;
+ case 16: MHz *= 12; break;
+ case 18: MHz *= 13; break;
+ case 20: MHz *= 14; break;
+ case 22: MHz *= 15; break;
+ case 24: MHz *= 16; break;
+ case 26: MHz *= 17; break;
+ case 28: MHz *= 18; break;
+ case 30: MHz *= 19; break;
+ case 32: MHz *= 20; break;
+ case 34: MHz *= 21; break;
+ case 36: MHz *= 22; break;
+ case 38: MHz *= 23; break;
+ case 40: MHz *= 24; break;
+ case 42: MHz *= 25; break;
+ }
+ } else {
+ panic("Unknown AMD CPU family (%d).", amd_family);
+ }
+
+ return (MHz * 1000); /* return CPU freq. in KHz */
+}
+#endif
+
+void __init
+time_init(void)
+{
+ unsigned int cpu_khz;
+ unsigned int lapic_khz;
+
+ /*
+ * Detect the CPU frequency
+ */
+#if defined CONFIG_PC
+ cpu_khz = pit_calibrate_tsc();
+ pit_stop_timer0();
+#elif defined CONFIG_CRAY_XT
+ cpu_khz = crayxt_detect_cpu_freq();
+#else
+ #error "In time_init(), unknown system architecture."
+#endif
+
+ cpu_info[this_cpu].arch.cur_cpu_khz = cpu_khz;
+ cpu_info[this_cpu].arch.max_cpu_khz = cpu_khz;
+ cpu_info[this_cpu].arch.min_cpu_khz = cpu_khz;
+ cpu_info[this_cpu].arch.tsc_khz = cpu_khz;
+
+ init_cycles2ns(cpu_khz);
+
+ /*
+ * Detect the Local APIC timer's base clock frequency
+ */
+ if (this_cpu == 0) {
+ lapic_khz = lapic_calibrate_timer();
+ } else {
+ lapic_khz = cpu_info[0].arch.lapic_khz;
+ }
+
+ cpu_info[this_cpu].arch.lapic_khz = lapic_khz;
+
+ printk(KERN_DEBUG "CPU %u: %u.%03u MHz, LAPIC bus %u.%03u MHz\n",
+ this_cpu,
+ cpu_khz / 1000, cpu_khz % 1000,
+ lapic_khz / 1000, lapic_khz % 1000
+ );
+}
+
--- /dev/null
+/*
+ *
+ * Trampoline.S Derived from Setup.S by Linus Torvalds
+ *
+ * 4 Jan 1997 Michael Chastain: changed to gnu as.
+ * 15 Sept 2005 Eric Biederman: 64bit PIC support
+ *
+ * Entry: CS:IP point to the start of our code, we are
+ * in real mode with no stack, but the rest of the
+ * trampoline page to make our stack and everything else
+ * is a mystery.
+ *
+ * In fact we don't actually need a stack so we don't
+ * set one up.
+ *
+ * On entry to trampoline_data, the processor is in real mode
+ * with 16-bit addressing and 16-bit data. CS has some value
+ * and IP is zero. Thus, data addresses need to be absolute
+ * (no relocation) and are taken with regard to r_base.
+ *
+ * With the addition of trampoline_level4_pgt this code can
+ * now enter a 64bit kernel that lives at arbitrary 64bit
+ * physical addresses.
+ *
+ * If you work on this file, check the object module with objdump
+ * --full-contents --reloc to make sure there are no relocation
+ * entries.
+ */
+
+#include <lwk/linkage.h>
+#include <arch/pgtable.h>
+#include <arch/page.h>
+#include <arch/msr.h>
+#include <arch/segment.h>
+
+.data
+
+.code16
+
+ENTRY(trampoline_data)
+r_base = .
+ cli # We should be safe anyway
+ wbinvd
+ mov %cs, %ax # Code and data in the same place
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %ss
+
+
+ movl $0xA5A5A5A5, trampoline_data - r_base
+ # write marker for master knows we're running
+
+ # Setup stack
+ movw $(trampoline_stack_end - r_base), %sp
+
+ call verify_cpu # Verify the cpu supports long mode
+ testl %eax, %eax # Check for return code
+ jnz no_longmode
+
+ mov %cs, %ax
+ movzx %ax, %esi # Find the 32bit trampoline location
+ shll $4, %esi
+
+ # Fixup the vectors
+ addl %esi, startup_32_vector - r_base
+ addl %esi, startup_64_vector - r_base
+ addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer
+
+ /*
+ * GDT tables in non default location kernel can be beyond 16MB and
+ * lgdt will not be able to load the address as in real mode default
+ * operand size is 16bit. Use lgdtl instead to force operand size
+ * to 32 bit.
+ */
+
+ lidtl tidt - r_base # load idt with 0, 0
+ lgdtl tgdt - r_base # load gdt with whatever is appropriate
+
+ xor %ax, %ax
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+
+ # flush prefetch and jump to startup_32
+ ljmpl *(startup_32_vector - r_base)
+
+ .code32
+ .balign 4
+startup_32:
+ movl $__KERNEL_DS, %eax # Initialize the %ds segment register
+ movl %eax, %ds
+
+ xorl %eax, %eax
+ btsl $5, %eax # Enable PAE mode
+ movl %eax, %cr4
+
+ # Setup trampoline 4 level pagetables
+ leal (trampoline_level4_pgt - r_base)(%esi), %eax
+ movl %eax, %cr3
+
+ movl $MSR_EFER, %ecx
+ movl $(1 << _EFER_LME), %eax # Enable Long Mode
+ xorl %edx, %edx
+ wrmsr
+
+ xorl %eax, %eax
+ btsl $31, %eax # Enable paging and in turn activate Long Mode
+ btsl $0, %eax # Enable protected mode
+ movl %eax, %cr0
+
+ /*
+ * At this point we're in long mode but in 32bit compatibility mode
+ * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
+ * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
+ * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
+ */
+ ljmp *(startup_64_vector - r_base)(%esi)
+
+ .code64
+ .balign 4
+startup_64:
+ # Now jump into the kernel using virtual addresses
+ movq $secondary_startup_64, %rax
+ jmp *%rax
+
+ .code16
+no_longmode:
+ hlt
+ jmp no_longmode
+#include "verify_cpu.S"
+
+ # Careful these need to be in the same 64K segment as the above;
+tidt:
+ .word 0 # idt limit = 0
+ .word 0, 0 # idt base = 0L
+
+ # Duplicate the global descriptor table
+ # so the kernel can live anywhere
+ .balign 4
+tgdt:
+ .short tgdt_end - tgdt # gdt limit
+ .long tgdt - r_base
+ .short 0
+ .quad 0x00cf9b000000ffff # __KERNEL32_CS
+ .quad 0x00af9b000000ffff # __KERNEL_CS
+ .quad 0x00cf93000000ffff # __KERNEL_DS
+tgdt_end:
+
+ .balign 4
+startup_32_vector:
+ .long startup_32 - r_base
+ .word __KERNEL32_CS, 0
+
+ .balign 4
+startup_64_vector:
+ .long startup_64 - r_base
+ .word __KERNEL_CS, 0
+
+trampoline_stack:
+ .org 0x1000
+trampoline_stack_end:
+ENTRY(trampoline_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .fill 510,8,0
+ .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
+ENTRY(trampoline_end)
--- /dev/null
+/*
+ *
+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
+ * code has been borrowed from boot/setup.S and was introduced by
+ * Andi Kleen.
+ *
+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ *
+ * This is a common code for verification whether CPU supports
+ * long mode and SSE or not. It is not called directly instead this
+ * file is included at various places and compiled in that context.
+ * Following are the current usage.
+ *
+ * This file is included by both 16bit and 32bit code.
+ *
+ * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
+ * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
+ * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
+ * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
+ *
+ * verify_cpu, returns the status of cpu check in register %eax.
+ * 0: Success 1: Failure
+ *
+ * The caller needs to check for the error code and take the action
+ * appropriately. Either display a message or halt.
+ */
+
+#include <arch/cpufeature.h>
+
+verify_cpu:
+ pushfl # Save caller passed flags
+ pushl $0 # Kill any dangerous flags
+ popfl
+
+ /* minimum CPUID flags for x86-64 as defined by AMD */
+#define M(x) (1<<(x))
+#define M2(a,b) M(a)|M(b)
+#define M4(a,b,c,d) M(a)|M(b)|M(c)|M(d)
+
+#define SSE_MASK \
+ (M2(X86_FEATURE_XMM,X86_FEATURE_XMM2))
+#define REQUIRED_MASK1 \
+ (M4(X86_FEATURE_FPU,X86_FEATURE_PSE,X86_FEATURE_TSC,X86_FEATURE_MSR)|\
+ M4(X86_FEATURE_PAE,X86_FEATURE_CX8,X86_FEATURE_PGE,X86_FEATURE_CMOV)|\
+ M(X86_FEATURE_FXSR))
+#define REQUIRED_MASK2 \
+ (M(X86_FEATURE_LM - 32))
+
+ pushfl # standard way to check for cpuid
+ popl %eax
+ movl %eax,%ebx
+ xorl $0x200000,%eax
+ pushl %eax
+ popfl
+ pushfl
+ popl %eax
+ cmpl %eax,%ebx
+ jz verify_cpu_no_longmode # cpu has no cpuid
+
+ movl $0x0,%eax # See if cpuid 1 is implemented
+ cpuid
+ cmpl $0x1,%eax
+ jb verify_cpu_no_longmode # no cpuid 1
+
+ xor %di,%di
+ cmpl $0x68747541,%ebx # AuthenticAMD
+ jnz verify_cpu_noamd
+ cmpl $0x69746e65,%edx
+ jnz verify_cpu_noamd
+ cmpl $0x444d4163,%ecx
+ jnz verify_cpu_noamd
+ mov $1,%di # cpu is from AMD
+
+verify_cpu_noamd:
+ movl $0x1,%eax # Does the cpu have what it takes
+ cpuid
+ andl $REQUIRED_MASK1,%edx
+ xorl $REQUIRED_MASK1,%edx
+ jnz verify_cpu_no_longmode
+
+ movl $0x80000000,%eax # See if extended cpuid is implemented
+ cpuid
+ cmpl $0x80000001,%eax
+ jb verify_cpu_no_longmode # no extended cpuid
+
+ movl $0x80000001,%eax # Does the cpu have what it takes
+ cpuid
+ andl $REQUIRED_MASK2,%edx
+ xorl $REQUIRED_MASK2,%edx
+ jnz verify_cpu_no_longmode
+
+verify_cpu_sse_test:
+ movl $1,%eax
+ cpuid
+ andl $SSE_MASK,%edx
+ cmpl $SSE_MASK,%edx
+ je verify_cpu_sse_ok
+ test %di,%di
+ jz verify_cpu_no_longmode # only try to force SSE on AMD
+ movl $0xc0010015,%ecx # HWCR
+ rdmsr
+ btr $15,%eax # enable SSE
+ wrmsr
+ xor %di,%di # don't loop
+ jmp verify_cpu_sse_test # try again
+
+verify_cpu_no_longmode:
+ popfl # Restore caller passed flags
+ movl $1,%eax
+ ret
+verify_cpu_sse_ok:
+ popfl # Restore caller passed flags
+ xorl %eax, %eax
+ ret
--- /dev/null
+/* ld script to make x86-64 LWK kernel
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ */
+
+#define LOAD_OFFSET __START_KERNEL_map
+
+#include <arch-generic/vmlwk.lds.h>
+#include <arch/page.h>
+
+#undef i386 /* in case the preprocessor is a 32bit one */
+
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(phys_startup_64)
+_proxy_pda = 1;
+PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
+ data PT_LOAD FLAGS(7); /* RWE */
+ user PT_LOAD FLAGS(7); /* RWE */
+ data.init PT_LOAD FLAGS(7); /* RWE */
+ note PT_NOTE FLAGS(4); /* R__ */
+}
+SECTIONS
+{
+ . = __START_KERNEL;
+ phys_startup_64 = startup_64 - LOAD_OFFSET;
+ _text = .; /* Text and read-only data */
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ /* First the code that has to be first for bootstrapping */
+ *(.bootstrap.text)
+ _stext = .;
+ /* Then the rest */
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0x9090
+ /* out-of-line lock text */
+ .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
+
+ _etext = .; /* End of text section */
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
+ __stop___ex_table = .;
+
+ BUG_TABLE
+
+ RODATA
+
+ . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
+ /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
+ DATA_DATA
+ CONSTRUCTORS
+ } :data
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(PAGE_SIZE);
+ . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
+ *(.data.cacheline_aligned)
+ }
+ . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
+ .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+ *(.data.read_mostly)
+ }
+
+#define VSYSCALL_ADDR (-10*1024*1024)
+#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
+#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
+
+#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
+#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
+
+#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
+#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
+
+ . = VSYSCALL_ADDR;
+ .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
+ __vsyscall_0 = VSYSCALL_VIRT_ADDR;
+
+ .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
+ { *(.vsyscall_1) }
+
+ .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2))
+ { *(.vsyscall_2) }
+
+ .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
+ { *(.vsyscall_3) }
+
+ . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
+
+#undef VSYSCALL_ADDR
+#undef VSYSCALL_PHYS_ADDR
+#undef VSYSCALL_VIRT_ADDR
+#undef VLOAD_OFFSET
+#undef VLOAD
+#undef VVIRT_OFFSET
+#undef VVIRT
+
+ . = ALIGN(8192); /* bootstrap_task */
+ .data.bootstrap_task : AT(ADDR(.data.bootstrap_task) - LOAD_OFFSET) {
+ *(.data.bootstrap_task)
+ }:data.init
+
+ . = ALIGN(4096);
+ .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+ *(.data.page_aligned)
+ }
+
+ /* might get freed after init */
+ . = ALIGN(4096);
+ __smp_alt_begin = .;
+ __smp_alt_instructions = .;
+ .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
+ *(.smp_altinstructions)
+ }
+ __smp_alt_instructions_end = .;
+ . = ALIGN(8);
+ __smp_locks = .;
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ *(.smp_locks)
+ }
+ __smp_locks_end = .;
+ .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
+ *(.smp_altinstr_replacement)
+ }
+ . = ALIGN(4096);
+ __smp_alt_end = .;
+
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ __initdata_begin = .;
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
+ __initdata_end = .;
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
+ INITCALLS
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+ *(.con_initcall.init)
+ }
+ __con_initcall_end = .;
+ SECURITY_INIT
+ . = ALIGN(8);
+ __alt_instructions = .;
+ .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+ *(.altinstructions)
+ }
+ __alt_instructions_end = .;
+ .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
+ *(.altinstr_replacement)
+ }
+ /* .exit.text is discard at runtime, not link time, to deal with references
+ from .altinstructions and .eh_frame */
+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ . = ALIGN(4096);
+ __initramfs_start = .;
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
+ __initramfs_end = .;
+#endif
+
+ . = ALIGN(4096);
+ __per_cpu_start = .;
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
+ __per_cpu_end = .;
+ . = ALIGN(4096);
+ __init_end = .;
+
+ . = ALIGN(4096);
+ __nosave_begin = .;
+ .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
+ . = ALIGN(4096);
+ __nosave_end = .;
+
+ __bss_start = .; /* BSS */
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+ *(.bss.page_aligned)
+ *(.bss)
+ }
+ __bss_stop = .;
+
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exitcall.exit)
+ *(.eh_frame)
+ }
+
+ STABS_DEBUG
+
+ DWARF_DEBUG
+}
--- /dev/null
+/*
+ * Derived from Linux 2.6.25 linux-2.6.25/arch/x86/kernel/vsyscall.c
+ * Original header:
+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ * Thanks to hpa@transmeta.com for some useful hint.
+ * Special thanks to Ingo Molnar for his early experience with
+ * a different vsyscall implementation for Linux/IA32 and for the name.
+ *
+ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
+ * at virtual address -10Mbyte+1024bytes etc... There are at max 4
+ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
+ * jumping out of line if necessary. We cannot add more with this
+ * mechanism because older kernels won't return -ENOSYS.
+ * If we want more than four we need a vDSO.
+ *
+ * Note: the concept clashes with user mode linux. If you use UML and
+ * want per guest time just set the kernel.vsyscall64 sysctl to 0.
+ */
+
+#include <lwk/kernel.h>
+#include <lwk/init.h>
+#include <lwk/time.h>
+#include <lwk/unistd.h>
+#include <arch/vsyscall.h>
+#include <arch/pgtable.h>
+#include <arch/fixmap.h>
+
+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define __syscall_clobber "r11","cx","memory"
+
+int __vsyscall(0)
+vgettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ int ret;
+ asm volatile("syscall"
+ : "=a" (ret)
+ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
+ : __syscall_clobber );
+ return ret;
+}
+
+time_t __vsyscall(1)
+vtime(time_t *t)
+{
+ int ret;
+ asm volatile("syscall"
+ : "=a" (ret)
+ : "0" (__NR_time),"D" (t)
+ : __syscall_clobber );
+ return ret;
+}
+
+void __init
+vsyscall_map(void)
+{
+ extern char __vsyscall_0;
+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+
+ /* Setup the virtual syscall fixmap entry */
+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
+
+ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
+
+ BUG_ON((unsigned long) &vgettimeofday !=
+ VSYSCALL_ADDR(__NR_vgettimeofday));
+ BUG_ON((unsigned long) &vtime !=
+ VSYSCALL_ADDR(__NR_vtime));
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/spinlock.h>
+#include <lwk/xcall.h>
+#include <arch/apic.h>
+#include <arch/idt_vectors.h>
+#include <arch/processor.h>
+
+/**
+ * Used to pass data to and synchronize the CPUs targeted by a cross-call.
+ */
+struct xcall_data_struct {
+ void (*func)(void *info);
+ void * info;
+ atomic_t started;
+ atomic_t finished;
+ bool wait;
+};
+
+/**
+ * Global cross-call data pointer, protected by xcall_data_lock.
+ */
+static struct xcall_data_struct *xcall_data;
+static DEFINE_SPINLOCK(xcall_data_lock);
+
+/**
+ * x86_64 specific code for carrying out inter-CPU function calls.
+ * This function should not be called directly. Call xcall_function() instead.
+ *
+ * Arguments:
+ * [IN] cpu_mask: The target CPUs of the cross-call.
+ * [IN] func: The function to execute on each target CPU.
+ * [IN] info: Argument to pass to func().
+ * [IN] wait: true = wait for cross-call to fully complete.
+ *
+ * Returns:
+ * Success: 0
+ * Failure: Error code
+ */
+int
+arch_xcall_function(
+ cpumask_t cpu_mask,
+ void (*func)(void *info),
+ void * info,
+ bool wait
+)
+{
+ struct xcall_data_struct data;
+ unsigned int num_cpus;
+ unsigned int cpu;
+
+ BUG_ON(irqs_disabled());
+
+ /* Count how many CPUs are being targeted */
+ num_cpus = cpus_weight(cpu_mask);
+ if (!num_cpus)
+ return 0;
+
+ /* Fill in the xcall data structure on our stack */
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ if (wait)
+ atomic_set(&data.finished, 0);
+ data.wait = wait;
+
+ /* Spin with IRQs enabled */
+ while (!spin_trylock_irq(&xcall_data_lock))
+ ;
+ /* IRQs are now disabled */
+
+ /* Set the global xcall data pointer */
+ xcall_data = &data;
+ wmb();
+
+ /* Send inter-processor interrupts to the target CPUs */
+ for_each_cpu_mask(cpu, cpu_mask)
+ lapic_send_ipi(cpu, XCALL_FUNCTION_VECTOR);
+
+ /* Wait for initiation responses */
+ while (atomic_read(&data.started) != num_cpus)
+ cpu_relax();
+
+ /* If requested, wait for completion responses */
+ if (wait) {
+ while (atomic_read(&data.finished) != num_cpus)
+ cpu_relax();
+ }
+ spin_unlock_irq(&xcall_data_lock);
+
+ return 0;
+}
+
+/**
+ * The interrupt handler for inter-CPU function calls.
+ */
+void
+arch_xcall_function_interrupt(struct pt_regs *regs, unsigned int vector)
+{
+ void (*func)(void *info) = xcall_data->func;
+ void *info = xcall_data->info;
+ int wait = xcall_data->wait;
+
+ /* Notify initiating CPU that we've started */
+ mb();
+ atomic_inc(&xcall_data->started);
+
+ /* Execute the cross-call function */
+ (*func)(info);
+
+ /* Notify initiating CPU that the cross-call function has completed */
+ if (wait) {
+ mb();
+ atomic_inc(&xcall_data->finished);
+ }
+}
+
+/**
+ * Sends a reschedule inter-processor interrupt to the target CPU.
+ * This causes the target CPU to call schedule().
+ */
+void
+arch_xcall_reschedule(id_t cpu)
+{
+ lapic_send_ipi(cpu, XCALL_RESCHEDULE_VECTOR);
+}
+
+/**
+ * The interrupt handler for inter-CPU reschedule calls.
+ */
+void
+arch_xcall_reschedule_interrupt(struct pt_regs *regs, unsigned int vector)
+{
+ /*
+ * Nothing to do, schedule() will be automatically
+ * called before returning to user-space
+ */
+}
--- /dev/null
+#
+# Makefile for x86_64-specific library files.
+#
+lib-y += memmove.o memset.o memcpy.o thunk.o delay.o bitops.o extable.o copy_user.o usercopy.o getuser.o putuser.o
--- /dev/null
+#include <lwk/bitops.h>
+
+#undef find_first_zero_bit
+#undef find_next_zero_bit
+#undef find_first_bit
+#undef find_next_bit
+
+static inline long
+__find_first_zero_bit(const unsigned long * addr, unsigned long size)
+{
+ long d0, d1, d2;
+ long res;
+
+ /*
+ * We must test the size in words, not in bits, because
+ * otherwise incoming sizes in the range -63..-1 will not run
+ * any scasq instructions, and then the flags used by the je
+ * instruction will have whatever random value was in place
+ * before. Nobody should call us like that, but
+ * find_next_zero_bit() does when offset and size are at the
+ * same word and it fails to find a zero itself.
+ */
+ size += 63;
+ size >>= 6;
+ if (!size)
+ return 0;
+ asm volatile(
+ " repe; scasq\n"
+ " je 1f\n"
+ " xorq -8(%%rdi),%%rax\n"
+ " subq $8,%%rdi\n"
+ " bsfq %%rax,%%rdx\n"
+ "1: subq %[addr],%%rdi\n"
+ " shlq $3,%%rdi\n"
+ " addq %%rdi,%%rdx"
+ :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+ :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
+ [addr] "S" (addr) : "memory");
+ /*
+ * Any register would do for [addr] above, but GCC tends to
+ * prefer rbx over rsi, even though rsi is readily available
+ * and doesn't have to be saved.
+ */
+ return res;
+}
+
+/**
+ * find_first_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first zero bit, not the number of the byte
+ * containing a bit.
+ */
+long find_first_zero_bit(const unsigned long * addr, unsigned long size)
+{
+ return __find_first_zero_bit (addr, size);
+}
+
+/**
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+long find_next_zero_bit (const unsigned long * addr, long size, long offset)
+{
+ const unsigned long * p = addr + (offset >> 6);
+ unsigned long set = 0;
+ unsigned long res, bit = offset&63;
+
+ if (bit) {
+ /*
+ * Look for zero in first word
+ */
+ asm("bsfq %1,%0\n\t"
+ "cmoveq %2,%0"
+ : "=r" (set)
+ : "r" (~(*p >> bit)), "r"(64L));
+ if (set < (64 - bit))
+ return set + offset;
+ set = 64 - bit;
+ p++;
+ }
+ /*
+ * No zero yet, search remaining full words for a zero
+ */
+ res = __find_first_zero_bit (p, size - 64 * (p - addr));
+
+ return (offset + set + res);
+}
+
+static inline long
+__find_first_bit(const unsigned long * addr, unsigned long size)
+{
+ long d0, d1;
+ long res;
+
+ /*
+ * We must test the size in words, not in bits, because
+ * otherwise incoming sizes in the range -63..-1 will not run
+ * any scasq instructions, and then the flags used by the jz
+ * instruction will have whatever random value was in place
+ * before. Nobody should call us like that, but
+ * find_next_bit() does when offset and size are at the same
+ * word and it fails to find a one itself.
+ */
+ size += 63;
+ size >>= 6;
+ if (!size)
+ return 0;
+ asm volatile(
+ " repe; scasq\n"
+ " jz 1f\n"
+ " subq $8,%%rdi\n"
+ " bsfq (%%rdi),%%rax\n"
+ "1: subq %[addr],%%rdi\n"
+ " shlq $3,%%rdi\n"
+ " addq %%rdi,%%rax"
+ :"=a" (res), "=&c" (d0), "=&D" (d1)
+ :"0" (0ULL), "1" (size), "2" (addr),
+ [addr] "r" (addr) : "memory");
+ return res;
+}
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+long find_first_bit(const unsigned long * addr, unsigned long size)
+{
+ return __find_first_bit(addr,size);
+}
+
+/**
+ * find_next_bit - find the first set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+long find_next_bit(const unsigned long * addr, long size, long offset)
+{
+ const unsigned long * p = addr + (offset >> 6);
+ unsigned long set = 0, bit = offset & 63, res;
+
+ if (bit) {
+ /*
+ * Look for nonzero in the first 64 bits:
+ */
+ asm("bsfq %1,%0\n\t"
+ "cmoveq %2,%0\n\t"
+ : "=r" (set)
+ : "r" (*p >> bit), "r" (64L));
+ if (set < (64 - bit))
+ return set + offset;
+ set = 64 - bit;
+ p++;
+ }
+ /*
+ * No set bit yet, search remaining full words for a bit
+ */
+ res = __find_first_bit (p, size - 64 * (p - addr));
+ return (offset + set + res);
+}
+
+#include <lwk/linux_compat.h>
+
+EXPORT_SYMBOL(find_next_bit);
+EXPORT_SYMBOL(find_first_bit);
+EXPORT_SYMBOL(find_first_zero_bit);
+EXPORT_SYMBOL(find_next_zero_bit);
--- /dev/null
+/* Copyright 2002 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License v2.
+ *
+ * Functions to copy from and to user space.
+ */
+
+#include <lwk/linkage.h>
+#include <arch/dwarf2.h>
+
+#include <arch/current.h>
+#include <arch/asm-offsets.h>
+
+/* Standard copy_to_user with segment limit checking */
+ENTRY(copy_to_user)
+ CFI_STARTPROC
+ GET_CURRENT(%rax)
+ movq %rdi,%rcx
+ addq %rdx,%rcx
+ jc bad_to_user
+ cmpq tsk_arch_addr_limit(%rax),%rcx
+ jae bad_to_user
+ xorl %eax,%eax /* clear zero flag */
+ jmp copy_user_generic_string
+ CFI_ENDPROC
+
+ENTRY(copy_user_generic)
+ CFI_STARTPROC
+ movl $1,%ecx /* set zero flag */
+ jmp copy_user_generic_string
+ CFI_ENDPROC
+
+ENTRY(__copy_from_user_inatomic)
+ CFI_STARTPROC
+ xorl %ecx,%ecx /* clear zero flag */
+ jmp copy_user_generic_string
+ CFI_ENDPROC
+
+/* Standard copy_from_user with segment limit checking */
+ENTRY(copy_from_user)
+ CFI_STARTPROC
+ GET_CURRENT(%rax)
+ movq %rsi,%rcx
+ addq %rdx,%rcx
+ jc bad_from_user
+ cmpq tsk_arch_addr_limit(%rax),%rcx
+ jae bad_from_user
+ movl $1,%ecx /* set zero flag */
+ jmp copy_user_generic_string
+ CFI_ENDPROC
+ENDPROC(copy_from_user)
+
+ .section .fixup,"ax"
+ /* must zero dest */
+bad_from_user:
+ CFI_STARTPROC
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+ stosb
+bad_to_user:
+ movl %edx,%eax
+ ret
+ CFI_ENDPROC
+END(bad_from_user)
+ .previous
+
+
+ /* rdi destination
+ * rsi source
+ * rdx count
+ * ecx zero flag
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successfull.
+ *
+ * Only 4GB of copy is supported. This shouldn't be a problem
+ * because the kernel normally only writes from/to page sized chunks
+ * even if user space passed a longer buffer.
+ * And more would be dangerous because both Intel and AMD have
+ * errata with rep movsq > 4GB. If someone feels the need to fix
+ * this please consider this.
+ */
+ENTRY(copy_user_generic_string)
+ CFI_STARTPROC
+ movl %ecx,%r8d /* save zero flag */
+ movl %edx,%ecx
+ shrl $3,%ecx
+ andl $7,%edx
+ jz 10f
+1: rep
+ movsq
+ movl %edx,%ecx
+2: rep
+ movsb
+9: movl %ecx,%eax
+ ret
+
+ /* multiple of 8 byte */
+10: rep
+ movsq
+ xor %eax,%eax
+ ret
+
+ /* exception handling */
+3: lea (%rdx,%rcx,8),%rax /* exception on quad loop */
+ jmp 6f
+5: movl %ecx,%eax /* exception on byte loop */
+ /* eax: left over bytes */
+6: testl %r8d,%r8d /* zero flag set? */
+ jz 7f
+ movl %eax,%ecx /* initialize x86 loop counter */
+ push %rax
+ xorl %eax,%eax
+8: rep
+ stosb /* zero the rest */
+11: pop %rax
+7: ret
+ CFI_ENDPROC
+END(copy_user_generic_c)
+
+ .section __ex_table,"a"
+ .quad 1b,3b
+ .quad 2b,5b
+ .quad 8b,11b
+ .quad 10b,3b
+ .previous
--- /dev/null
+/*
+ * Precise Delay Loops for x86-64
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * The __delay function must _NOT_ be inlined as its execution time
+ * depends wildly on alignment on many x86 processors.
+ */
+
+#include <lwk/smp.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/delay.h>
+#include <arch/msr.h>
+#include <arch/processor.h>
+#include <arch/smp.h>
+
+void __delay(unsigned long cycles)
+{
+ unsigned bclock, now;
+
+ rdtscl(bclock);
+ do
+ {
+ rep_nop();
+ rdtscl(now);
+ }
+ while((now-bclock) < cycles);
+}
+
+inline void __const_udelay(unsigned long xsecs)
+{
+ __delay(
+ (xsecs * cpu_info[this_cpu].arch.tsc_khz * 1000) /* cycles * 2**32 */
+ >> 32 /* div by 2**32 */
+ );
+}
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+}
+
+void __ndelay(unsigned long nsecs)
+{
+ __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}
+
--- /dev/null
+/*
+ * lwk/arch/x86_64/lib/extable.c
+ */
+
+#include <lwk/stddef.h>
+#include <lwk/extable.h>
+
+/* Simple binary search */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ /* Work around a B stepping K8 bug */
+ if ((value >> 32) == 0)
+ value |= 0xffffffffUL << 32;
+
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+ long diff;
+
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0)
+ return mid;
+ else if (diff < 0)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+ return NULL;
+}
--- /dev/null
+/*
+ * __get_user functions.
+ *
+ * (C) Copyright 1998 Linus Torvalds
+ * (C) Copyright 2005 Andi Kleen
+ *
+ * These functions have a non-standard call interface
+ * to make them more efficient, especially as they
+ * return an error value in addition to the "real"
+ * return value.
+ */
+
+/*
+ * __get_user_X
+ *
+ * Inputs: %rcx contains the address.
+ * The register is modified, but all changes are undone
+ * before returning because the C code doesn't know about it.
+ *
+ * Outputs: %rax is error code (0 or -EFAULT)
+ * %rdx contains zero-extended value
+ *
+ * %r8 is destroyed.
+ *
+ * These functions should not modify any other registers,
+ * as they get called from within inline assembly.
+ */
+
+#include <lwk/linkage.h>
+#include <arch/dwarf2.h>
+#include <arch/page.h>
+#include <arch/errno.h>
+#include <arch/asm-offsets.h>
+#include <arch/current.h>
+
+ .text
+ENTRY(__get_user_1)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae bad_get_user
+1: movzb (%rcx),%edx
+ xorl %eax,%eax
+ ret
+ CFI_ENDPROC
+ENDPROC(__get_user_1)
+
+ENTRY(__get_user_2)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ addq $1,%rcx
+ jc 20f
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae 20f
+ decq %rcx
+2: movzwl (%rcx),%edx
+ xorl %eax,%eax
+ ret
+20: decq %rcx
+ jmp bad_get_user
+ CFI_ENDPROC
+ENDPROC(__get_user_2)
+
+ENTRY(__get_user_4)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ addq $3,%rcx
+ jc 30f
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae 30f
+ subq $3,%rcx
+3: movl (%rcx),%edx
+ xorl %eax,%eax
+ ret
+30: subq $3,%rcx
+ jmp bad_get_user
+ CFI_ENDPROC
+ENDPROC(__get_user_4)
+
+ENTRY(__get_user_8)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ addq $7,%rcx
+ jc 40f
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae 40f
+ subq $7,%rcx
+4: movq (%rcx),%rdx
+ xorl %eax,%eax
+ ret
+40: subq $7,%rcx
+ jmp bad_get_user
+ CFI_ENDPROC
+ENDPROC(__get_user_8)
+
+bad_get_user:
+ CFI_STARTPROC
+ xorl %edx,%edx
+ movq $(-EFAULT),%rax
+ ret
+ CFI_ENDPROC
+END(bad_get_user)
+
+.section __ex_table,"a"
+ .quad 1b,bad_get_user
+ .quad 2b,bad_get_user
+ .quad 3b,bad_get_user
+ .quad 4b,bad_get_user
+.previous
--- /dev/null
+/* Copyright 2002 Andi Kleen */
+
+ #include <arch/cpufeature.h>
+/*
+ * memcpy - Copy a memory block.
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count
+ *
+ * Output:
+ * rax original destination
+ */
+
+ .globl __memcpy
+ .globl memcpy
+ .p2align 4
+__memcpy:
+memcpy:
+ pushq %rbx
+ movq %rdi,%rax
+
+ movl %edx,%ecx
+ shrl $6,%ecx
+ jz .Lhandle_tail
+
+ .p2align 4
+.Lloop_64:
+ decl %ecx
+
+ movq (%rsi),%r11
+ movq 8(%rsi),%r8
+
+ movq %r11,(%rdi)
+ movq %r8,1*8(%rdi)
+
+ movq 2*8(%rsi),%r9
+ movq 3*8(%rsi),%r10
+
+ movq %r9,2*8(%rdi)
+ movq %r10,3*8(%rdi)
+
+ movq 4*8(%rsi),%r11
+ movq 5*8(%rsi),%r8
+
+ movq %r11,4*8(%rdi)
+ movq %r8,5*8(%rdi)
+
+ movq 6*8(%rsi),%r9
+ movq 7*8(%rsi),%r10
+
+ movq %r9,6*8(%rdi)
+ movq %r10,7*8(%rdi)
+
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+ jnz .Lloop_64
+
+.Lhandle_tail:
+ movl %edx,%ecx
+ andl $63,%ecx
+ shrl $3,%ecx
+ jz .Lhandle_7
+ .p2align 4
+.Lloop_8:
+ decl %ecx
+ movq (%rsi),%r8
+ movq %r8,(%rdi)
+ leaq 8(%rdi),%rdi
+ leaq 8(%rsi),%rsi
+ jnz .Lloop_8
+
+.Lhandle_7:
+ movl %edx,%ecx
+ andl $7,%ecx
+ jz .Lende
+ .p2align 4
+.Lloop_1:
+ movb (%rsi),%r8b
+ movb %r8b,(%rdi)
+ incq %rdi
+ incq %rsi
+ decl %ecx
+ jnz .Lloop_1
+
+.Lende:
+ popq %rbx
+ ret
+.Lfinal:
+
+ /* Some CPUs run faster using the string copy instructions.
+ It is also a lot simpler. Use this when possible */
+
+ .section .altinstructions,"a"
+ .align 8
+ .quad memcpy
+ .quad memcpy_c
+ .byte X86_FEATURE_REP_GOOD
+ .byte .Lfinal-memcpy
+ .byte memcpy_c_end-memcpy_c
+ .previous
+
+ .section .altinstr_replacement,"ax"
+ /* rdi destination
+ * rsi source
+ * rdx count
+ */
+memcpy_c:
+ movq %rdi,%rax
+ movl %edx,%ecx
+ shrl $3,%ecx
+ andl $7,%edx
+ rep
+ movsq
+ movl %edx,%ecx
+ rep
+ movsb
+ ret
+memcpy_c_end:
+ .previous
--- /dev/null
+/* Normally compiler builtins are used, but sometimes the compiler calls out
+ of line code. Based on asm-i386/string.h.
+ */
+#define _STRING_C
+#include <lwk/string.h>
+
+#undef memmove
+void *memmove(void * dest, const void *src, size_t count)
+{
+ if (dest < src) {
+ __inline_memcpy(dest,src,count);
+ } else {
+ char *p = (char *) dest + count;
+ char *s = (char *) src + count;
+ while (count--)
+ *--p = *--s;
+ }
+ return dest;
+}
--- /dev/null
+/* Normally compiler builtins are used, but sometimes the compiler calls out
+ of line code. Based on asm-i386/string.h.
+ */
+#define _STRING_C
+#include <lwk/string.h>
+
+#undef memset
+void *memset(void *dest, int c, size_t count)
+{
+ char *p = (char *) dest + count;
+ while (count--)
+ *--p = c;
+ return dest;
+}
--- /dev/null
+/*
+ * __put_user functions.
+ *
+ * (C) Copyright 1998 Linus Torvalds
+ * (C) Copyright 2005 Andi Kleen
+ *
+ * These functions have a non-standard call interface
+ * to make them more efficient, especially as they
+ * return an error value in addition to the "real"
+ * return value.
+ */
+
+/*
+ * __put_user_X
+ *
+ * Inputs: %rcx contains the address
+ * %rdx contains new value
+ *
+ * Outputs: %rax is error code (0 or -EFAULT)
+ *
+ * %r8 is destroyed.
+ *
+ * These functions should not modify any other registers,
+ * as they get called from within inline assembly.
+ */
+
+#include <lwk/linkage.h>
+#include <arch/dwarf2.h>
+#include <arch/page.h>
+#include <arch/errno.h>
+#include <arch/asm-offsets.h>
+#include <arch/current.h>
+
+ .text
+ENTRY(__put_user_1)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae bad_put_user
+1: movb %dl,(%rcx)
+ xorl %eax,%eax
+ ret
+ CFI_ENDPROC
+ENDPROC(__put_user_1)
+
+ENTRY(__put_user_2)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ addq $1,%rcx
+ jc 20f
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae 20f
+ decq %rcx
+2: movw %dx,(%rcx)
+ xorl %eax,%eax
+ ret
+20: decq %rcx
+ jmp bad_put_user
+ CFI_ENDPROC
+ENDPROC(__put_user_2)
+
+ENTRY(__put_user_4)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ addq $3,%rcx
+ jc 30f
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae 30f
+ subq $3,%rcx
+3: movl %edx,(%rcx)
+ xorl %eax,%eax
+ ret
+30: subq $3,%rcx
+ jmp bad_put_user
+ CFI_ENDPROC
+ENDPROC(__put_user_4)
+
+ENTRY(__put_user_8)
+ CFI_STARTPROC
+ GET_CURRENT(%r8)
+ addq $7,%rcx
+ jc 40f
+ cmpq tsk_arch_addr_limit(%r8),%rcx
+ jae 40f
+ subq $7,%rcx
+4: movq %rdx,(%rcx)
+ xorl %eax,%eax
+ ret
+40: subq $7,%rcx
+ jmp bad_put_user
+ CFI_ENDPROC
+ENDPROC(__put_user_8)
+
+bad_put_user:
+ CFI_STARTPROC
+ movq $(-EFAULT),%rax
+ ret
+ CFI_ENDPROC
+END(bad_put_user)
+
+.section __ex_table,"a"
+ .quad 1b,bad_put_user
+ .quad 2b,bad_put_user
+ .quad 3b,bad_put_user
+ .quad 4b,bad_put_user
+.previous
--- /dev/null
+ /*
+ * Save registers before calling assembly functions. This avoids
+ * disturbance of register allocation in some inline assembly constructs.
+ * Copyright 2001,2002 by Andi Kleen, SuSE Labs.
+ * Subject to the GNU public license, v.2. No warranty of any kind.
+ * $Id: thunk.S,v 1.2 2002/03/13 20:06:58 ak Exp $
+ */
+
+ #include <lwk/linkage.h>
+ #include <arch/dwarf2.h>
+ #include <arch/calling.h>
+ #include <arch/rwlock.h>
+
+ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+ .macro thunk name,func
+ .globl \name
+\name:
+ CFI_STARTPROC
+ SAVE_ARGS
+ call \func
+ jmp restore
+ CFI_ENDPROC
+ .endm
+
+ /* rdi: arg1 ... normal C conventions. rax is passed from C. */
+ .macro thunk_retrax name,func
+ .globl \name
+\name:
+ CFI_STARTPROC
+ SAVE_ARGS
+ call \func
+ jmp restore_norax
+ CFI_ENDPROC
+ .endm
+
+
+#if 0
+ .section .sched.text
+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
+ thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed
+ thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed
+ thunk rwsem_wake_thunk,rwsem_wake
+ thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
+#endif
+
+ thunk __down_failed,__down
+ thunk_retrax __down_failed_interruptible,__down_interruptible
+ thunk_retrax __down_failed_trylock,__down_trylock
+ thunk __up_wakeup,__up
+#endif
+
+ /* SAVE_ARGS below is used only for the .cfi directives it contains. */
+ CFI_STARTPROC
+ SAVE_ARGS
+restore:
+ RESTORE_ARGS
+ ret
+ CFI_ENDPROC
+
+ CFI_STARTPROC
+ SAVE_ARGS
+restore_norax:
+ RESTORE_ARGS 1
+ ret
+ CFI_ENDPROC
+
+/* Support for read/write spinlocks. */
+ .text
+/* rax: pointer to rwlock_t */
+ENTRY(__write_lock_failed)
+ lock
+ addl $RW_LOCK_BIAS,(%rax)
+1: rep
+ nop
+ cmpl $RW_LOCK_BIAS,(%rax)
+ jne 1b
+ lock
+ subl $RW_LOCK_BIAS,(%rax)
+ jnz __write_lock_failed
+ ret
+
+/* rax: pointer to rwlock_t */
+ENTRY(__read_lock_failed)
+ lock
+ incl (%rax)
+1: rep
+ nop
+ cmpl $1,(%rax)
+ js 1b
+ lock
+ decl (%rax)
+ js __read_lock_failed
+ ret
--- /dev/null
+/*
+ * User address space access functions.
+ *
+ * Copyright 1997 Andi Kleen <ak@muc.de>
+ * Copyright 1997 Linus Torvalds
+ * Copyright 2002 Andi Kleen <ak@suse.de>
+ */
+#include <arch/uaccess.h>
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+
+#define __do_strncpy_from_user(dst,src,count,res) \
+do { \
+ long __d0, __d1, __d2; \
+ __asm__ __volatile__( \
+ " testq %1,%1\n" \
+ " jz 2f\n" \
+ "0: lodsb\n" \
+ " stosb\n" \
+ " testb %%al,%%al\n" \
+ " jz 1f\n" \
+ " decq %1\n" \
+ " jnz 0b\n" \
+ "1: subq %1,%0\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movq %5,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 0b,3b\n" \
+ ".previous" \
+ : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \
+ "=&D" (__d2) \
+ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
+ : "memory"); \
+} while (0)
+
+long
+__strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res;
+ __do_strncpy_from_user(dst, src, count, res);
+ return res;
+}
+
+long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res = -EFAULT;
+ if (access_ok(VERIFY_READ, src, 1))
+ return __strncpy_from_user(dst, src, count);
+ return res;
+}
+
+/*
+ * Zero Userspace
+ */
+
+unsigned long __clear_user(void __user *addr, unsigned long size)
+{
+ long __d0;
+ /* no memory constraint because it doesn't change any memory gcc knows
+ about */
+ asm volatile(
+ " testq %[size8],%[size8]\n"
+ " jz 4f\n"
+ "0: movq %[zero],(%[dst])\n"
+ " addq %[eight],%[dst]\n"
+ " decl %%ecx ; jnz 0b\n"
+ "4: movq %[size1],%%rcx\n"
+ " testl %%ecx,%%ecx\n"
+ " jz 2f\n"
+ "1: movb %b[zero],(%[dst])\n"
+ " incq %[dst]\n"
+ " decl %%ecx ; jnz 1b\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: lea 0(%[size1],%[size8],8),%[size8]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,3b\n"
+ " .quad 1b,2b\n"
+ ".previous"
+ : [size8] "=c"(size), [dst] "=&D" (__d0)
+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
+ [zero] "r" (0UL), [eight] "r" (8UL));
+ return size;
+}
+
+unsigned long clear_user(void __user *to, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __clear_user(to, n);
+ return n;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+
+long __strnlen_user(const char __user *s, long n)
+{
+ long res = 0;
+ char c;
+
+ while (1) {
+ if (res>n)
+ return n+1;
+ if (__get_user(c, s))
+ return 0;
+ if (!c)
+ return res+1;
+ res++;
+ s++;
+ }
+}
+
+long strnlen_user(const char __user *s, long n)
+{
+ if (!access_ok(VERIFY_READ, s, n))
+ return 0;
+ return __strnlen_user(s, n);
+}
+
+long strlen_user(const char __user *s)
+{
+ long res = 0;
+ char c;
+
+ for (;;) {
+ if (get_user(c, s))
+ return 0;
+ if (!c)
+ return res+1;
+ res++;
+ s++;
+ }
+}
+
+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
+{
+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
+ return copy_user_generic((__force void *)to, (__force void *)from, len);
+ }
+ return len;
+}
+
--- /dev/null
+obj-y := init.o fault.o phys_addr.o aspace.o
--- /dev/null
+/* Copyright (c) 2007,2008 Sandia National Laboratories */
+
+#include <lwk/kernel.h>
+#include <lwk/aspace.h>
+#include <lwk/task.h>
+#include <lwk/init_task.h>
+#include <arch/page.h> /* TODO: remove */
+#include <arch/pgtable.h> /* TODO: remove */
+#include <arch/page_table.h>
+
+
+/**
+ * Architecture specific address space initialization. This allocates a new
+ * page table root for the aspace and copies the kernel page tables into it.
+ */
+int
+arch_aspace_create(
+ struct aspace * aspace
+)
+{
+ unsigned int i;
+
+ /* Allocate a root page table for the address space */
+ if ((aspace->arch.pgd = kmem_get_pages(0)) == NULL)
+ return -ENOMEM;
+
+ /* Copy the current kernel page tables into the address space */
+ for (i = pgd_index(PAGE_OFFSET); i < PTRS_PER_PGD; i++)
+ aspace->arch.pgd[i] = bootstrap_task.aspace->arch.pgd[i];
+
+ return 0;
+}
+
+
+/**
+ * Architecture specific address space destruction. This frees all page table
+ * memory that the aspace was using.
+ */
+void
+arch_aspace_destroy(
+ struct aspace * aspace
+)
+{
+ unsigned int i, j, k;
+
+ xpte_t *pgd; /* Page Global Directory: level 0 (root of tree) */
+ xpte_t *pud; /* Page Upper Directory: level 1 */
+ xpte_t *pmd; /* Page Middle Directory: level 2 */
+ xpte_t *ptd; /* Page Table Directory: level 3 */
+
+ /* Walk and then free the Page Global Directory */
+ pgd = aspace->arch.pgd;
+ for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
+ if (!pgd[i].present)
+ continue;
+
+ /* Walk and then free the Page Upper Directory */
+ pud = __va(pgd[i].base_paddr << 12);
+ for (j = 0; j < 512; j++) {
+ if (!pud[j].present || pud[j].pagesize)
+ continue;
+
+ /* Walk and then free the Page Middle Directory */
+ pmd = __va(pud[j].base_paddr << 12);
+ for (k = 0; k < 512; k++) {
+ if (!pmd[k].present || pmd[k].pagesize)
+ continue;
+
+ /* Free the last level Page Table Directory */
+ ptd = __va(pmd[k].base_paddr << 12);
+ kmem_free_pages(ptd, 0);
+ }
+ kmem_free_pages(pmd, 0);
+ }
+ kmem_free_pages(pud, 0);
+ }
+ kmem_free_pages(pgd, 0);
+}
+
+
+/**
+ * Loads the address space object's root page table pointer into the calling
+ * CPU's CR3 register, causing the aspace to become active.
+ */
+void
+arch_aspace_activate(
+ struct aspace * aspace
+)
+{
+ asm volatile(
+ "movq %0,%%cr3" :: "r" (__pa(aspace->arch.pgd)) : "memory"
+ );
+}
+
+
+/**
+ * Allocates a new page table and links it to a parent page table entry.
+ */
+static xpte_t *
+alloc_page_table(
+ xpte_t * parent_pte
+)
+{
+ xpte_t *new_table;
+
+ new_table = kmem_get_pages(0);
+ if (!new_table)
+ return NULL;
+
+ if (parent_pte) {
+ xpte_t _pte;
+
+ memset(&_pte, 0, sizeof(_pte));
+ _pte.present = 1;
+ _pte.write = 1;
+ _pte.user = 1;
+ _pte.base_paddr = __pa(new_table) >> 12;
+
+ *parent_pte = _pte;
+ }
+
+ return new_table;
+}
+
+
+/**
+ * Locates an existing page table entry or creates a new one if none exists.
+ * Returns a pointer to the page table entry.
+ */
+static xpte_t *
+find_or_create_pte(
+ struct aspace * aspace,
+ vaddr_t vaddr,
+ vmpagesize_t pagesz
+)
+{
+ xpte_t *pgd; /* Page Global Directory: level 0 (root of tree) */
+ xpte_t *pud; /* Page Upper Directory: level 1 */
+ xpte_t *pmd; /* Page Middle Directory: level 2 */
+ xpte_t *ptd; /* Page Table Directory: level 3 */
+
+ xpte_t *pge; /* Page Global Directory Entry */
+ xpte_t *pue; /* Page Upper Directory Entry */
+ xpte_t *pme; /* Page Middle Directory Entry */
+ xpte_t *pte; /* Page Table Directory Entry */
+
+ /* Calculate indices into above directories based on vaddr specified */
+ const unsigned int pgd_index = (vaddr >> 39) & 0x1FF;
+ const unsigned int pud_index = (vaddr >> 30) & 0x1FF;
+ const unsigned int pmd_index = (vaddr >> 21) & 0x1FF;
+ const unsigned int ptd_index = (vaddr >> 12) & 0x1FF;
+
+ /* Traverse the Page Global Directory */
+ pgd = aspace->arch.pgd;
+ pge = &pgd[pgd_index];
+ if (!pge->present && !alloc_page_table(pge))
+ return NULL;
+
+ /* Traverse the Page Upper Directory */
+ pud = __va(pge->base_paddr << 12);
+ pue = &pud[pud_index];
+ if (pagesz == VM_PAGE_1GB)
+ return pue;
+ else if (!pue->present && !alloc_page_table(pue))
+ return NULL;
+ else if (pue->pagesize)
+ panic("BUG: Can't follow PUD entry, pagesize bit set.");
+
+ /* Traverse the Page Middle Directory */
+ pmd = __va(pue->base_paddr << 12);
+ pme = &pmd[pmd_index];
+ if (pagesz == VM_PAGE_2MB)
+ return pme;
+ else if (!pme->present && !alloc_page_table(pme))
+ return NULL;
+ else if (pme->pagesize)
+ panic("BUG: Can't follow PMD entry, pagesize bit set.");
+
+ /* Traverse the Page Table Entry Directory */
+ ptd = __va(pme->base_paddr << 12);
+ pte = &ptd[ptd_index];
+ return pte;
+}
+
+
+/**
+ * Examines a page table to determine if it has any active entries. If not,
+ * the page table is freed.
+ */
+static int
+try_to_free_table(
+ xpte_t * table,
+ xpte_t * parent_pte
+)
+{
+ int i;
+
+ /* Determine if the table can be freed */
+ for (i = 0; i < 512; i++) {
+ if (table[i].present)
+ return -1; /* Nope */
+ }
+
+ /* Yup, free the page table */
+ kmem_free_pages(table, 0);
+ memset(parent_pte, 0, sizeof(xpte_t));
+ return 0;
+}
+
+
+/**
+ * Zeros a page table entry. If the page table that the PTE was in becomes
+ * empty (contains no active mappings), it is freed. Page table freeing
+ * continues up to the top of the page table tree (e.g., a single call may
+ * result in a PTD, PMD, and PUD being freed; the PGD is never freed by this
+ * function).
+ */
+static void
+find_and_delete_pte(
+ struct aspace * aspace,
+ vaddr_t vaddr,
+ vmpagesize_t pagesz
+)
+{
+ xpte_t *pgd; /* Page Global Directory: level 0 (root of tree) */
+ xpte_t *pud; /* Page Upper Directory: level 1 */
+ xpte_t *pmd; /* Page Middle Directory: level 2 */
+ xpte_t *ptd; /* Page Table Directory: level 3 */
+
+ xpte_t *pge; /* Page Global Directory Entry */
+ xpte_t *pue; /* Page Upper Directory Entry */
+ xpte_t *pme; /* Page Middle Directory Entry */
+ xpte_t *pte; /* Page Table Directory Entry */
+
+ /* Calculate indices into above directories based on vaddr specified */
+ const unsigned int pgd_index = (vaddr >> 39) & 0x1FF;
+ const unsigned int pud_index = (vaddr >> 30) & 0x1FF;
+ const unsigned int pmd_index = (vaddr >> 21) & 0x1FF;
+ const unsigned int ptd_index = (vaddr >> 12) & 0x1FF;
+
+ /* Traverse the Page Global Directory */
+ pgd = aspace->arch.pgd;
+ pge = &pgd[pgd_index];
+ if (!pge->present)
+ return;
+
+ /* Traverse the Page Upper Directory */
+ pud = __va(pge->base_paddr << 12);
+ pue = &pud[pud_index];
+ if (!pue->present) {
+ return;
+ } else if (pagesz == VM_PAGE_1GB) {
+ if (!pue->pagesize)
+ panic("BUG: 1GB PTE has child page table attached.\n");
+
+ /* Unmap the 1GB page that this PTE was mapping */
+ memset(pue, 0, sizeof(xpte_t));
+
+ /* Try to free PUD that the PTE was in */
+ try_to_free_table(pud, pge);
+ return;
+ }
+
+ /* Traverse the Page Middle Directory */
+ pmd = __va(pue->base_paddr << 12);
+ pme = &pmd[pmd_index];
+ if (!pme->present) {
+ return;
+ } else if (pagesz == VM_PAGE_2MB) {
+ if (!pme->pagesize)
+ panic("BUG: 2MB PTE has child page table attached.\n");
+
+ /* Unmap the 2MB page that this PTE was mapping */
+ memset(pme, 0, sizeof(xpte_t));
+
+ /* Try to free the PMD that the PTE was in */
+ if (try_to_free_table(pmd, pue))
+ return; /* nope, couldn't free it */
+
+ /* Try to free the PUD that contained the PMD just freed */
+ try_to_free_table(pud, pge);
+ return;
+ }
+
+ /* Traverse the Page Table Entry Directory */
+ ptd = __va(pme->base_paddr << 12);
+ pte = &ptd[ptd_index];
+ if (pte->present) {
+ return;
+ } else {
+ /* Unmap the 4KB page that this PTE was mapping */
+ memset(pme, 0, sizeof(xpte_t));
+
+ /* Try to free the PTD that the PTE was in */
+ if (try_to_free_table(ptd, pme))
+ return; /* nope, couldn't free it */
+
+ /* Try to free the PMD that contained the PTD just freed */
+ if (try_to_free_table(pmd, pue))
+ return; /* nope, couldn't free it */
+
+ /* Try to free the PUD that contained the PMD just freed */
+ try_to_free_table(pud, pge);
+ return;
+ }
+}
+
+
+/**
+ * Writes a new value to a PTE.
+ * TODO: Determine if this is atomic enough.
+ */
+static void
+write_pte(
+ xpte_t * pte,
+ paddr_t paddr,
+ vmflags_t flags,
+ vmpagesize_t pagesz
+)
+{
+ xpte_t _pte;
+ memset(&_pte, 0, sizeof(_pte));
+
+ _pte.present = 1;
+ if (flags & VM_WRITE)
+ _pte.write = 1;
+ if (flags & VM_USER)
+ _pte.user = 1;
+ if (flags & VM_GLOBAL)
+ _pte.global = 1;
+ if ((flags & VM_EXEC) == 0)
+ _pte.no_exec = 1;
+
+ if (pagesz == VM_PAGE_4KB) {
+ _pte.base_paddr = paddr >> 12;
+ } else if (pagesz == VM_PAGE_2MB) {
+ _pte.pagesize = 1;
+ _pte.base_paddr = paddr >> 21;
+ } else if (pagesz == VM_PAGE_1GB) {
+ _pte.pagesize = 1;
+ _pte.base_paddr = paddr >> 30;
+ } else {
+ panic("Invalid page size 0x%lx.", pagesz);
+ }
+
+ *pte = _pte;
+}
+
+
+/**
+ * Maps a page into an address space.
+ *
+ * Arguments:
+ * [IN] aspace: Address space to map page into.
+ * [IN] start: Address in aspace to map page to.
+ * [IN] paddr: Physical address of the page to map.
+ * [IN] flags: Protection and memory type flags.
+ * [IN] pagesz: Size of the page being mapped, in bytes.
+ *
+ * Returns:
+ * Success: 0
+ * Failure: Error Code, the page was not mapped.
+ */
+int
+arch_aspace_map_page(
+ struct aspace * aspace,
+ vaddr_t start,
+ paddr_t paddr,
+ vmflags_t flags,
+ vmpagesize_t pagesz
+)
+{
+ xpte_t *pte;
+
+ /* Locate page table entry that needs to be updated to map the page */
+ pte = find_or_create_pte(aspace, start, pagesz);
+ if (!pte)
+ return -ENOMEM;
+
+ /* Update the page table entry */
+ write_pte(pte, paddr, flags, pagesz);
+
+ return 0;
+}
+
+
+/**
+ * Unmaps a page from an address space.
+ *
+ * Arguments:
+ * [IN] aspace: Address space to unmap page from.
+ * [IN] start: Address in aspace to unmap page from.
+ * [IN] pagesz: Size of the page to unmap.
+ */
+void
+arch_aspace_unmap_page(
+ struct aspace * aspace,
+ vaddr_t start,
+ vmpagesize_t pagesz
+)
+{
+ find_and_delete_pte(aspace, start, pagesz);
+}
+
+int
+arch_aspace_smartmap(struct aspace *src, struct aspace *dst,
+ vaddr_t start, size_t extent)
+{
+ size_t n = extent / SMARTMAP_ALIGN;
+ size_t i;
+ xpte_t *src_pgd = src->arch.pgd;
+ xpte_t *dst_pgd = dst->arch.pgd;
+ xpte_t *src_pge, *dst_pge;
+
+ /* Make sure all of the source PGD entries are present */
+ for (i = 0; i < n; i++) {
+ src_pge = &src_pgd[i];
+ if (!src_pge->present && !alloc_page_table(src_pge))
+ return -ENOMEM;
+ }
+
+ /* Perform the SMARTMAP... just copy src PGEs to the dst PGD */
+ for (i = 0; i < n; i++) {
+ src_pge = &src_pgd[i];
+ dst_pge = &dst_pgd[(start >> 39) & 0x1FF];
+ BUG_ON(dst_pge->present);
+ dst_pge = src_pge;
+ }
+
+ return 0;
+}
+
+int
+arch_aspace_unsmartmap(struct aspace *src, struct aspace *dst,
+ vaddr_t start, size_t extent)
+{
+ size_t n = extent / SMARTMAP_ALIGN;
+ size_t i;
+ xpte_t *dst_pgd = dst->arch.pgd;
+ xpte_t *dst_pge;
+
+ /* Unmap the SMARTMAP PGEs */
+ for (i = 0; i < n; i++) {
+ dst_pge = &dst_pgd[(start >> 39) & 0x1FF];
+ dst_pge->present = 0;
+ }
+
+ return 0;
+}
--- /dev/null
+#include <lwk/task.h>
+#include <lwk/signal.h>
+#include <lwk/ptrace.h>
+
+/**
+ * Determines if a signal is unhandled.
+ * Returns 1 if the signal is unhandled, 0 otherwise.
+ */
+int
+unhandled_signal(struct task_struct *tsk, int sig)
+{
+ if (is_init(tsk))
+ return 1;
+ if (tsk->ptrace & PT_PTRACED)
+ return 0;
+ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/bootmem.h>
+#include <lwk/string.h>
+#include <lwk/pmem.h>
+#include <arch/page.h>
+#include <arch/pgtable.h>
+#include <arch/e820.h>
+#include <arch/tlbflush.h>
+#include <arch/proto.h>
+
+/**
+ * Start and end page frame numbers of the kernel page tables.
+ */
+unsigned long __initdata table_start, table_end; /* page frame numbers */
+
+static __init void *early_ioremap(unsigned long addr, unsigned long size)
+{
+ unsigned long vaddr;
+ pmd_t *pmd, *last_pmd;
+ int i, pmds;
+
+ pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
+ vaddr = __START_KERNEL_map;
+ pmd = level2_kernel_pgt;
+ last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
+ for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
+ for (i = 0; i < pmds; i++) {
+ if (pmd_present(pmd[i]))
+ goto next;
+ }
+ vaddr += addr & ~PMD_MASK;
+ addr &= PMD_MASK;
+ for (i = 0; i < pmds; i++, addr += PMD_SIZE)
+ set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
+ __flush_tlb();
+ return (void *)vaddr;
+ next:
+ ;
+ }
+ printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
+ return NULL;
+}
+
+/* To avoid virtual aliases later */
+static __init void early_iounmap(void *addr, unsigned long size)
+{
+ unsigned long vaddr;
+ pmd_t *pmd;
+ int i, pmds;
+
+ vaddr = (unsigned long)addr;
+ pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
+ pmd = level2_kernel_pgt + pmd_index(vaddr);
+ for (i = 0; i < pmds; i++)
+ pmd_clear(pmd + i);
+ __flush_tlb();
+}
+
+static __init void *alloc_low_page(unsigned long *phys)
+{
+ unsigned long pfn = table_end++;
+ void *adr;
+
+ if (pfn >= end_pfn)
+ panic("alloc_low_page: ran out of memory");
+
+ adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
+ memset(adr, 0, PAGE_SIZE);
+ *phys = pfn * PAGE_SIZE;
+ return adr;
+}
+
+/**
+ * Destroys a temporary mapping that was setup by alloc_low_page().
+ */
+static void __init unmap_low_page(void *adr)
+{
+ early_iounmap(adr, PAGE_SIZE);
+}
+
+/**
+ * Initializes a fixmap entry to point to a given physical page.
+ */
+void __init
+__set_fixmap(
+ enum fixed_addresses fixmap_index, /* fixmap entry index to setup */
+ unsigned long phys_addr, /* map fixmap entry to this addr */
+ pgprot_t prot /* page protection bits */
+)
+{
+ unsigned long virt_addr;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte, new_pte;
+
+ if (fixmap_index >= __end_of_fixed_addresses)
+ panic("Invalid FIXMAP index");
+
+ /* Calculate the virtual address of the fixmap entry */
+ virt_addr = __fix_to_virt(fixmap_index);
+
+ /* Look up PGD entry covering the fixmap entry */
+ pgd = pgd_offset_k(virt_addr);
+ if (pgd_none(*pgd))
+ panic("PGD FIXMAP MISSING, it should be setup in head.S!\n");
+
+ /* Look up the PMD entry covering the fixmap entry */
+ pud = pud_offset(pgd, virt_addr);
+ if (pud_none(*pud)) {
+ /* PUD entry is empty... allocate a new PMD directory for it */
+ pmd = (pmd_t *) alloc_bootmem_aligned(PAGE_SIZE, PAGE_SIZE);
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+ BUG_ON(pmd != pmd_offset(pud, 0));
+ }
+
+ /* Look up the PMD entry covering the fixmap entry */
+ pmd = pmd_offset(pud, virt_addr);
+ if (pmd_none(*pmd)) {
+ /* PMD entry is empty... allocate a new PTE directory for it */
+ pte = (pte_t *) alloc_bootmem_aligned(PAGE_SIZE, PAGE_SIZE);
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+
+ /*
+ * Construct and install a new PTE that maps the fixmap entry
+ * to the requested physical address.
+ */
+ pte = pte_offset_kernel(pmd, virt_addr);
+ new_pte = pfn_pte(phys_addr >> PAGE_SHIFT, prot);
+ set_pte(pte, new_pte);
+ __flush_tlb_one(virt_addr);
+}
+
+/**
+ * Finds enough space for the kernel page tables.
+ */
+static void __init
+find_early_table_space(unsigned long end)
+{
+ unsigned long puds; /* # of pud page tables needed */
+ unsigned long pmds; /* # of pmd page tables needed */
+ unsigned long tables; /* page table memory needed, in bytes */
+ unsigned long start; /* start address for kernel page tables */
+
+ /*
+ * The kernel page tables map memory using 2 MB pages.
+ * This means only puds and pmds are needed.
+ */
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+ tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
+ round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+
+ /*
+ * Consult the memory map to find a region of suitable size.
+ */
+ start = 0x8000;
+ table_start = find_e820_area(start, end, tables);
+ if (table_start == -1UL)
+ panic("Cannot find space for the kernel page tables");
+
+ /*
+ * Store table_start and table_end as page frame numbers.
+ * table_end starts out as the same as table_start.
+ * It will be incremented for each page table allocated.
+ */
+ table_start >>= PAGE_SHIFT;
+ table_end = table_start;
+}
+
+
+/**
+ * Configures the input Page Middle Directory to map physical addresses
+ * [address, end). PMD entries outside of this range are zeroed.
+ *
+ * Each PMD table maps 1 GB of memory (512 entries, each mapping 2 MB).
+ */
+static void __init
+phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
+ unsigned long entry;
+
+ if (address > end) {
+ for (; i < PTRS_PER_PMD; i++, pmd++)
+ set_pmd(pmd, __pmd(0));
+ break;
+ }
+ entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
+ entry &= __supported_pte_mask;
+ set_pmd(pmd, __pmd(entry));
+ }
+}
+
+/**
+ * Configures the input Page Upper Directory to map physical addresses
+ * [address, end). PUD entries outside of this range are zeroed.
+ *
+ * Each PUD table maps 512 GB of memory (512 entries, each pointing to a PMD).
+ */
+static void __init
+phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+{
+ long i = pud_index(address);
+
+ pud = pud + i;
+
+ for (; i < PTRS_PER_PUD; pud++, i++) {
+ unsigned long paddr, pmd_phys;
+ pmd_t *pmd;
+
+ paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
+ if (paddr >= end)
+ break;
+
+ if (!e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
+ set_pud(pud, __pud(0));
+ continue;
+ }
+
+ pmd = alloc_low_page(&pmd_phys);
+ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
+ phys_pmd_init(pmd, paddr, end);
+ unmap_low_page(pmd);
+ }
+ __flush_tlb();
+}
+
+/**
+ * Setup the initial kernel page tables, These map all of physical memory
+ * (0 to the top of physical memory) starting at virtual address PAGE_OFFSET.
+ * This runs before bootmem is initialized and therefore has to get pages
+ * directly from physical memory.
+ */
+void __init
+init_kernel_pgtables(unsigned long start, unsigned long end)
+{
+ unsigned long next;
+
+ /*
+ * Find a contiguous region of memory large enough to hold the
+ * kernel page tables.
+ */
+ find_early_table_space(end);
+
+ /*
+ * Calculate the start and end kernel virtual addresses
+ * corresponding to the input physical address range.
+ */
+ start = (unsigned long)__va(start);
+ end = (unsigned long)__va(end);
+
+ for (; start < end; start = next) {
+ unsigned long pud_phys;
+ pud_t *pud;
+
+ /*
+ * Allocate a new page for the Page Upper Directory.
+ *
+ * pud = kernel virtual address where the new
+ * page can be accessed.
+ * pud_phys = physical address of the new page.
+ * map = cookie needed to free the temporary mapping.
+ */
+ pud = alloc_low_page(&pud_phys);
+
+ /*
+ * Calculate the upper bound address for the PUD.
+ * The PUD maps [start, next).
+ */
+ next = start + PGDIR_SIZE;
+ if (next > end)
+ next = end;
+
+ /*
+ * Initialize the new PUD.
+ * phys_pud_init internally calls phys_pmd_init for
+ * each entry in the PUD.
+ */
+ phys_pud_init(pud, __pa(start), __pa(next));
+
+ /*
+ * Point the Page Global Directory at the new PUD.
+ * The PGD is the root of the page tables.
+ */
+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+
+ /* Destroy the temporary kernel mapping of the new PUD */
+ unmap_low_page(pud);
+ }
+
+ asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
+ __flush_tlb_all();
+
+ printk(KERN_DEBUG
+ "Allocated %lu KB for kernel page tables [0x%lx - 0x%lx)\n",
+ ((table_end - table_start) << PAGE_SHIFT) / 1024,
+ table_start << PAGE_SHIFT,
+ table_end << PAGE_SHIFT);
+}
+
+/**
+ * This performs architecture-specific memory subsystem initialization. It is
+ * called from the platform-independent memsys_init(). For x86_64, the only
+ * thing that needs to be done is to relocate the initrd image to user memory.
+ */
+void __init
+arch_memsys_init(size_t kmem_size)
+{
+ struct pmem_region query, result;
+ size_t initrd_size, umem_size;
+
+ if (!initrd_start)
+ return;
+
+ initrd_size = initrd_end - initrd_start;
+ umem_size = round_up(initrd_size, PAGE_SIZE);
+
+ /* Relocate the initrd image to an unallocated chunk of umem */
+ if (pmem_alloc_umem(umem_size, PAGE_SIZE, &result))
+ panic("Failed to allocate umem for initrd image.");
+ result.type = PMEM_TYPE_INITRD;
+ pmem_update(&result);
+ memmove(__va(result.start), __va(initrd_start), initrd_size);
+
+ /* Free the memory used by the old initrd location */
+ pmem_region_unset_all(&query);
+ query.start = round_down( initrd_start, PAGE_SIZE );
+ query.end = round_up( initrd_end, PAGE_SIZE );
+ while (pmem_query(&query, &result) == 0) {
+ result.type = (result.start < kmem_size) ? PMEM_TYPE_KMEM
+ : PMEM_TYPE_UMEM;
+ result.allocated = false;
+ BUG_ON(pmem_update(&result));
+ query.start = result.end;
+ }
+
+ /* Update initrd_start and initrd_end to their new values */
+ initrd_start = result.start;
+ initrd_end = initrd_start + initrd_size;
+}
+
--- /dev/null
+#include <arch/page.h>
+
+extern unsigned long phys_base;
+
+/**
+ * This converts a kernel virtual address to a physical address.
+ *
+ * NOTE: This function only works for kernel virtual addresses in the kernel's
+ * identity mapping of all of physical memory. It will not work for the
+ * fixmap, vmalloc() areas, or any other type of virtual address.
+ */
+unsigned long
+__phys_addr(unsigned long virt_addr)
+{
+ /* Handle kernel symbols */
+ if (virt_addr >= __START_KERNEL_map)
+ return virt_addr - __START_KERNEL_map + phys_base;
+ /* Handle kernel data */
+ return virt_addr - PAGE_OFFSET;
+}
+
--- /dev/null
+obj-y += console/
--- /dev/null
+obj-$(CONFIG_VGA_CONSOLE) += vga.o
+obj-$(CONFIG_SERIAL_CONSOLE) += serial.o
+obj-$(CONFIG_RCAL0_CONSOLE) += rcal0.o
--- /dev/null
+#include <lwk/driver.h>
+#include <lwk/console.h>
+#include <lwk/string.h>
+#include <lwk/delay.h>
+#include <rca/rca_l0.h>
+
+/** Template event heder with static fields filled in. */
+static rs_event_t ev_hdr = {0};
+
+/** Set when L0 console has been initialized. */
+static int initialized = 0;
+
+/**
+ * Writes a message to the Cray L0 console.
+ */
+static void l0_write(struct console *con, const char *str)
+{
+ int ret = 0;
+ unsigned int n = strlen(str);
+
+ while (n) {
+ if ((ret =
+ ch_send_data(L0RCA_CH_CON_UP, &ev_hdr, (void *)str, n))
+ <= 0)
+ {
+ /* either error or we are done */
+ break;
+ }
+
+ if (n > ret) {
+ /* some bytes were sent, point to the remaining data */
+ str += (n - ret);
+ n = ret;
+ }
+
+ /* busy wait if the buf is full */
+ udelay(1000);
+ }
+
+ /* if error, give up and spin forever */
+ if (ret < 0)
+ while (1) {}
+
+ return;
+}
+
+/**
+ * Cray L0 console device.
+ */
+static struct console l0_console = {
+ .name = "Cray RCA L0 Console",
+ .write = l0_write
+};
+
+/**
+ * Initializes the Cray XT L0 console.
+ */
+void l0_console_init(void)
+{
+ if (initialized) {
+ printk(KERN_ERR "RCA L0 console already initialized.\n");
+ return;
+ }
+
+ /* Read the configuration information provided by the L0 */
+ l0rca_init_config();
+
+ /* Setup the event template to use for outgoing events */
+ ev_hdr.ev_id = ec_console_log;
+ ev_hdr.ev_gen = RCA_MKSVC(
+ RCA_INST_ANY,
+ RCA_SVCTYPE_CONS,
+ l0rca_get_proc_id()
+ );
+ ev_hdr.ev_src = ev_hdr.ev_gen;
+ ev_hdr.ev_priority = RCA_LOG_DEBUG;
+ ev_hdr.ev_flag = 0;
+ /* Timestamp, len & data is filled at the time of sending event */
+
+ /* Register with the Cray RCA subsystem */
+ register_ch_up(L0RCA_CH_CON_UP, NULL, 0, -1);
+
+ /* Register the L0 console with the LWK */
+ console_register(&l0_console);
+ initialized = 1;
+}
+
+driver_init(l0_console_init);
+
--- /dev/null
+#include <lwk/driver.h>
+#include <lwk/console.h>
+#include <lwk/interrupt.h>
+#include <arch/io.h>
+
+// Serial port registers
+#define TXB 0 // Transmitter Holding Buffer W
+#define RXB 0 // Receiver Buffer R
+#define DLL 0 // Divisor Latch Low Byte R/W
+#define IER 1 // Interrupt Enable Register R/W
+#define DLH 1 // Divisor Latch High Byte R/W
+#define IIR 2 // Interrupt Identification Register R
+#define FCR 2 // FIFO Control Register W
+#define LCR 3 // Line Control Register R/W
+#define MCR 4 // Modem Control Register R/W
+#define LSR 5 // Line Status Register R
+#define MSR 6 // Modem Status Register R
+#define SCR 7 // Scratch Register R/W
+
+// IER bits
+#define IER_RLSI 0x08 // RX interrupt
+#define IER_THRI 0x02 // TX interrupt
+#define IER_RDI 0x01 // Reciver data interrupt
+
+// MCR bits
+#define MCR_DTR 0x01 // Enable DTR
+#define MCR_RTS 0x02 // Enable RTS
+#define MCR_OUT2 0x08 // Enable Out2 (?)
+
+// LSR bits
+#define LSR_TXEMPT 0x20 // Empty TX holding register
+#define LSR_RXRDY 0x01 // Ready to receive
+
+// LCR bits
+#define LCR_DLAB 0x80 // Divisor latch access bit
+
+/** IO port address of the serial port. */
+static unsigned int port = 0x3F8; // COM1
+
+/** Serial port baud rate. */
+static unsigned int baud = 9600;
+#define SERIAL_MAX_BAUD 115200
+
+/** Set when serial console has been initialized. */
+static int initialized = 0;
+
+/**
+ * Prints a single character to the serial port.
+ */
+static void serial_putc(unsigned char c)
+{
+ // Wait until the TX buffer is empty
+ while ((inb_p(port + LSR) & LSR_TXEMPT) == 0)
+ ;
+ // Slam the 8 bits down the 1 bit pipe... meeeooowwwy!
+ outb(c, port);
+}
+
+/**
+ * Writes a string to the serial port.
+ */
+static void serial_write(struct console *con, const char *str)
+{
+ unsigned char c;
+ while ((c = *str++) != '\0') {
+ serial_putc(c);
+ if (c == '\n') // new line
+ serial_putc('\r'); // tack on carriage return
+ }
+}
+
+/**
+ * Serial port console device.
+ */
+static struct console serial_console = {
+ .name = "Serial Console",
+ .write = serial_write
+};
+
+/**
+ * Initializes and registers the serial console driver.
+ */
+void serial_console_init(void)
+{
+ // Setup the divisor latch registers for the specified baud rate
+ unsigned int div = SERIAL_MAX_BAUD / baud;
+
+ if (initialized) {
+ printk(KERN_ERR "Serial console already initialized.\n");
+ return;
+ }
+
+ outb( inb(port+LCR) | LCR_DLAB , port+LCR ); // set DLAB
+ outb( (div>>0) & 0xFF , port+DLL ); // set divisor low byte
+ outb( (div>>8) & 0xFF , port+DLH ); // set divisor high byte
+ outb( inb(port+LCR) & ~LCR_DLAB , port+LCR ); // unset DLAB
+
+ outb( 0x0 , port+IER ); // Disable serial port interrupts
+ outb( 0x0 , port+FCR ); // Don't use the FIFOs
+ outb( 0x3 , port+LCR ); // 8n1
+
+ // Setup modem control register
+ outb( MCR_RTS | MCR_DTR | MCR_OUT2 , port+MCR);
+
+ console_register(&serial_console);
+ initialized = 1;
+}
+
+driver_init(serial_console_init);
+
+/**
+ * Configurable parameters for controlling the serial port
+ * I/O port address and baud.
+ */
+driver_param(port, uint);
+driver_param(baud, uint);
+
--- /dev/null
+#include <lwk/driver.h>
+#include <lwk/console.h>
+#include <lwk/string.h>
+
+/** Base address of the VGA frame buffer. */
+static volatile uint8_t * const vga_fb = (uint8_t *) 0xffffffff800b8000ul;
+
+/** Current cursor row coordinate. */
+static int row = 0;
+
+/** Current cursor column coordinate. */
+static int col = 0;
+
+/** Number of rows on the screen. */
+static int nrows = 25;
+
+/** Number of columns on the screen. */
+static int ncols = 80;
+
+/** Set when vga console has been initialized. */
+static int initialized = 0;
+
+/** Calculates the offset in the vga_fb corresponding to (row, col). */
+static inline int cursor(int row, int col)
+{
+ return (row * ncols * 2) + col * 2;
+}
+
+/**
+ * Scrolls everything on the screen up by one row.
+ */
+static void vga_scroll(void)
+{
+ int i;
+
+ // Move all existing lines up by one
+ memmove(
+ (void *) vga_fb,
+ (void *) (vga_fb + cursor(1, 0)),
+ (nrows - 1) * ncols * sizeof(uint16_t)
+ );
+
+ // Blank the new line at the bottom of the screen
+ for (i = 0; i < ncols; i++)
+ vga_fb[cursor(nrows-1, i)] = ' ';
+}
+
+/**
+ * Moves cursor to the next line.
+ */
+static void vga_newline(void)
+{
+ row = row + 1;
+ col = 0;
+
+ if (row == nrows) {
+ row = nrows - 1;
+ vga_scroll();
+ }
+}
+
+/**
+ * Sets the VGA font color.
+ */
+static void vga_set_font_color(uint8_t color)
+{
+ int i, j;
+ for (i = 0; i < nrows; i++)
+ for (j = 0; j < ncols; j++)
+ vga_fb[cursor(i, j) + 1] = color;
+}
+
+/**
+ * Prints a single character to the screen.
+ */
+static void vga_putc(unsigned char c)
+{
+ // Print the character
+ vga_fb[cursor(row, col)] = c;
+
+ // Move cursor
+ if (++col == ncols)
+ vga_newline();
+}
+
+/**
+ * Writes a string to the screen at the current cursor location.
+ */
+static void vga_write(struct console *con, const char *str)
+{
+ unsigned char c;
+
+ while ((c = *str++) != '\0') {
+ switch (c) {
+ case '\n':
+ vga_newline();
+ break;
+
+ case '\t':
+ /* Emulate a TAB */
+ vga_putc(' ');
+ while ((col % 8) != 0)
+ vga_putc(' ');
+ break;
+
+ default:
+ vga_putc(c);
+ }
+ }
+}
+
+/**
+ * VGA console device.
+ */
+static struct console vga_console = {
+ .name = "VGA Console",
+ .write = vga_write
+};
+
+/**
+ * Initializes and registers the VGA console driver.
+ */
+void vga_console_init(void)
+{
+ if (initialized) {
+ printk(KERN_ERR "VGA console already initialized.\n");
+ return;
+ }
+
+ vga_set_font_color(0x0F /* White */);
+ console_register(&vga_console);
+ initialized = 1;
+}
+
+driver_init(vga_console_init);
+
+/**
+ * Sets the row on the screen to start printing at.
+ * This is used to avoid overwriting BIOS/boot messages.
+ * At least on x86-64, this is set automatically as part
+ * of the bootstrap process.
+ */
+driver_param(row, int);
+
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acconfig.h - Global configuration constants
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ACCONFIG_H
+#define _ACCONFIG_H
+
+/******************************************************************************
+ *
+ * Configuration options
+ *
+ *****************************************************************************/
+
+/*
+ * ACPI_DEBUG_OUTPUT - This switch enables all the debug facilities of the
+ * ACPI subsystem. This includes the DEBUG_PRINT output
+ * statements. When disabled, all DEBUG_PRINT
+ * statements are compiled out.
+ *
+ * ACPI_APPLICATION - Use this switch if the subsystem is going to be run
+ * at the application level.
+ *
+ */
+
+/* Current ACPICA subsystem version in YYYYMMDD format */
+
+#define ACPI_CA_VERSION 0x20060707
+
+/*
+ * OS name, used for the _OS object. The _OS object is essentially obsolete,
+ * but there is a large base of ASL/AML code in existing machines that check
+ * for the string below. The use of this string usually guarantees that
+ * the ASL will execute down the most tested code path. Also, there is some
+ * code that will not execute the _OSI method unless _OS matches the string
+ * below. Therefore, change this string at your own risk.
+ */
+#define ACPI_OS_NAME "Microsoft Windows NT"
+
+/* Maximum objects in the various object caches */
+
+#define ACPI_MAX_STATE_CACHE_DEPTH 96 /* State objects */
+#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */
+#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */
+#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */
+#define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */
+
+/*
+ * Should the subsystem abort the loading of an ACPI table if the
+ * table checksum is incorrect?
+ */
+#define ACPI_CHECKSUM_ABORT FALSE
+
+/******************************************************************************
+ *
+ * Subsystem Constants
+ *
+ *****************************************************************************/
+
+/* Version of ACPI supported */
+
+#define ACPI_CA_SUPPORT_LEVEL 3
+
+/* Maximum count for a semaphore object */
+
+#define ACPI_MAX_SEMAPHORE_COUNT 256
+
+/* Maximum object reference count (detects object deletion issues) */
+
+#define ACPI_MAX_REFERENCE_COUNT 0x800
+
+/* Size of cached memory mapping for system memory operation region */
+
+#define ACPI_SYSMEM_REGION_WINDOW_SIZE 4096
+
+/* owner_id tracking. 8 entries allows for 255 owner_ids */
+
+#define ACPI_NUM_OWNERID_MASKS 8
+
+/******************************************************************************
+ *
+ * ACPI Specification constants (Do not change unless the specification changes)
+ *
+ *****************************************************************************/
+
+/* Number of distinct GPE register blocks and register width */
+
+#define ACPI_MAX_GPE_BLOCKS 2
+#define ACPI_GPE_REGISTER_WIDTH 8
+
+/* Method info (in WALK_STATE), containing local variables and argumetns */
+
+#define ACPI_METHOD_NUM_LOCALS 8
+#define ACPI_METHOD_MAX_LOCAL 7
+
+#define ACPI_METHOD_NUM_ARGS 7
+#define ACPI_METHOD_MAX_ARG 6
+
+/* Length of _HID, _UID, _CID, and UUID values */
+
+#define ACPI_DEVICE_ID_LENGTH 0x09
+#define ACPI_MAX_CID_LENGTH 48
+#define ACPI_UUID_LENGTH 16
+
+/*
+ * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG
+ */
+#define ACPI_OBJ_NUM_OPERANDS 8
+#define ACPI_OBJ_MAX_OPERAND 7
+
+/* Names within the namespace are 4 bytes long */
+
+#define ACPI_NAME_SIZE 4
+#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */
+#define ACPI_PATH_SEPARATOR '.'
+
+/* Constants used in searching for the RSDP in low memory */
+
+#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */
+#define ACPI_EBDA_PTR_LENGTH 2
+#define ACPI_EBDA_WINDOW_SIZE 1024
+#define ACPI_HI_RSDP_WINDOW_BASE 0x000E0000 /* Physical Address */
+#define ACPI_HI_RSDP_WINDOW_SIZE 0x00020000
+#define ACPI_RSDP_SCAN_STEP 16
+
+/* Operation regions */
+
+#define ACPI_NUM_PREDEFINED_REGIONS 8
+#define ACPI_USER_REGION_BEGIN 0x80
+
+/* Maximum space_ids for Operation Regions */
+
+#define ACPI_MAX_ADDRESS_SPACE 255
+
+/* Array sizes. Used for range checking also */
+
+#define ACPI_MAX_MATCH_OPCODE 5
+
+/* RSDP checksums */
+
+#define ACPI_RSDP_CHECKSUM_LENGTH 20
+#define ACPI_RSDP_XCHECKSUM_LENGTH 36
+
+/* SMBus bidirectional buffer size */
+
+#define ACPI_SMBUS_BUFFER_SIZE 34
+
+/******************************************************************************
+ *
+ * ACPI AML Debugger
+ *
+ *****************************************************************************/
+
+#define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */
+
+#define ACPI_DEBUGGER_COMMAND_PROMPT '-'
+#define ACPI_DEBUGGER_EXECUTE_PROMPT '%'
+
+#endif /* _ACCONFIG_H */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acdebug.h - ACPI/AML debugger
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACDEBUG_H__
+#define __ACDEBUG_H__
+
+#define ACPI_DEBUG_BUFFER_SIZE 4196
+
+struct command_info {
+ char *name; /* Command Name */
+ u8 min_args; /* Minimum arguments required */
+};
+
+struct argument_info {
+ char *name; /* Argument Name */
+};
+
+#define PARAM_LIST(pl) pl
+#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose)
+#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\
+ acpi_os_printf PARAM_LIST(fp);}
+
+#define EX_NO_SINGLE_STEP 1
+#define EX_SINGLE_STEP 2
+
+/*
+ * dbxface - external debugger interfaces
+ */
+acpi_status acpi_db_initialize(void);
+
+void acpi_db_terminate(void);
+
+acpi_status
+acpi_db_single_step(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, u32 op_type);
+
+/*
+ * dbcmds - debug commands and output routines
+ */
+acpi_status acpi_db_disassemble_method(char *name);
+
+void acpi_db_display_table_info(char *table_arg);
+
+void acpi_db_unload_acpi_table(char *table_arg, char *instance_arg);
+
+void
+acpi_db_set_method_breakpoint(char *location,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op);
+
+void acpi_db_get_bus_info(void);
+
+void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op);
+
+void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
+
+void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
+
+void acpi_db_send_notify(char *name, u32 value);
+
+void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg);
+
+acpi_status
+acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
+
+acpi_status acpi_db_find_name_in_namespace(char *name_arg);
+
+void acpi_db_set_scope(char *name);
+
+acpi_status acpi_db_sleep(char *object_arg);
+
+void acpi_db_find_references(char *object_arg);
+
+void acpi_db_display_locks(void);
+
+void acpi_db_display_resources(char *object_arg);
+
+void acpi_db_display_gpes(void);
+
+void acpi_db_check_integrity(void);
+
+void acpi_db_generate_gpe(char *gpe_arg, char *block_arg);
+
+/*
+ * dbdisply - debug display commands
+ */
+void acpi_db_display_method_info(union acpi_parse_object *op);
+
+void acpi_db_decode_and_display_object(char *target, char *output_type);
+
+void
+acpi_db_display_result_object(union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status acpi_db_display_all_methods(char *display_count_arg);
+
+void acpi_db_display_arguments(void);
+
+void acpi_db_display_locals(void);
+
+void acpi_db_display_results(void);
+
+void acpi_db_display_calling_tree(void);
+
+void acpi_db_display_object_type(char *object_arg);
+
+void
+acpi_db_display_argument_object(union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+/*
+ * dbexec - debugger control method execution
+ */
+void acpi_db_execute(char *name, char **args, u32 flags);
+
+void
+acpi_db_create_execution_threads(char *num_threads_arg,
+ char *num_loops_arg, char *method_name_arg);
+
+/*
+ * dbfileio - Debugger file I/O commands
+ */
+acpi_object_type
+acpi_db_match_argument(char *user_argument, struct argument_info *arguments);
+
+void acpi_db_close_debug_file(void);
+
+void acpi_db_open_debug_file(char *name);
+
+acpi_status acpi_db_load_acpi_table(char *filename);
+
+acpi_status
+acpi_db_get_table_from_file(char *filename, struct acpi_table_header **table);
+
+acpi_status
+acpi_db_read_table_from_file(char *filename, struct acpi_table_header **table);
+
+/*
+ * dbhistry - debugger HISTORY command
+ */
+void acpi_db_add_to_history(char *command_line);
+
+void acpi_db_display_history(void);
+
+char *acpi_db_get_from_history(char *command_num_arg);
+
+/*
+ * dbinput - user front-end to the AML debugger
+ */
+acpi_status
+acpi_db_command_dispatch(char *input_buffer,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context);
+
+/*
+ * dbstats - Generation and display of ACPI table statistics
+ */
+void acpi_db_generate_statistics(union acpi_parse_object *root, u8 is_method);
+
+acpi_status acpi_db_display_statistics(char *type_arg);
+
+/*
+ * dbutils - AML debugger utilities
+ */
+void acpi_db_set_output_destination(u32 where);
+
+void acpi_db_dump_external_object(union acpi_object *obj_desc, u32 level);
+
+void acpi_db_prep_namestring(char *name);
+
+struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name);
+
+#endif /* __ACDEBUG_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acdisasm.h - AML disassembler
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACDISASM_H__
+#define __ACDISASM_H__
+
+#include "amlresrc.h"
+
+#define BLOCK_NONE 0
+#define BLOCK_PAREN 1
+#define BLOCK_BRACE 2
+#define BLOCK_COMMA_LIST 4
+#define ACPI_DEFAULT_RESNAME *(u32 *) "__RD"
+
+struct acpi_external_list {
+ char *path;
+ char *internal_path;
+ struct acpi_external_list *next;
+ u32 value;
+ u16 length;
+ u8 type;
+};
+
+extern struct acpi_external_list *acpi_gbl_external_list;
+
+typedef const struct acpi_dmtable_info {
+ u8 opcode;
+ u8 offset;
+ char *name;
+
+} acpi_dmtable_info;
+
+/*
+ * Values for Opcode above.
+ * Note: 0-7 must not change, used as a flag shift value
+ */
+#define ACPI_DMT_FLAG0 0
+#define ACPI_DMT_FLAG1 1
+#define ACPI_DMT_FLAG2 2
+#define ACPI_DMT_FLAG3 3
+#define ACPI_DMT_FLAG4 4
+#define ACPI_DMT_FLAG5 5
+#define ACPI_DMT_FLAG6 6
+#define ACPI_DMT_FLAG7 7
+#define ACPI_DMT_FLAGS0 8
+#define ACPI_DMT_FLAGS2 9
+#define ACPI_DMT_UINT8 10
+#define ACPI_DMT_UINT16 11
+#define ACPI_DMT_UINT24 12
+#define ACPI_DMT_UINT32 13
+#define ACPI_DMT_UINT56 14
+#define ACPI_DMT_UINT64 15
+#define ACPI_DMT_STRING 16
+#define ACPI_DMT_NAME4 17
+#define ACPI_DMT_NAME6 18
+#define ACPI_DMT_NAME8 19
+#define ACPI_DMT_CHKSUM 20
+#define ACPI_DMT_SPACEID 21
+#define ACPI_DMT_GAS 22
+#define ACPI_DMT_MADT 23
+#define ACPI_DMT_SRAT 24
+#define ACPI_DMT_EXIT 25
+
+typedef
+void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table);
+
+struct acpi_dmtable_data {
+ char *signature;
+ struct acpi_dmtable_info *table_info;
+ ACPI_TABLE_HANDLER table_handler;
+};
+
+struct acpi_op_walk_info {
+ u32 level;
+ u32 last_level;
+ u32 count;
+ u32 bit_offset;
+ u32 flags;
+ struct acpi_walk_state *walk_state;
+};
+
+typedef
+acpi_status(*asl_walk_callback) (union acpi_parse_object * op,
+ u32 level, void *context);
+
+struct acpi_resource_tag {
+ u32 bit_index;
+ char *tag;
+};
+
+/* Strings used for decoding flags to ASL keywords */
+
+extern const char *acpi_gbl_word_decode[];
+extern const char *acpi_gbl_irq_decode[];
+extern const char *acpi_gbl_lock_rule[];
+extern const char *acpi_gbl_access_types[];
+extern const char *acpi_gbl_update_rules[];
+extern const char *acpi_gbl_match_ops[];
+
+extern struct acpi_dmtable_info acpi_dm_table_info_asf0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
+extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
+extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
+extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
+extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
+extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_fadt2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_gas[];
+extern struct acpi_dmtable_info acpi_dm_table_info_header[];
+extern struct acpi_dmtable_info acpi_dm_table_info_hpet[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt3[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt4[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt5[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt6[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt7[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt8[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt_hdr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_mcfg[];
+extern struct acpi_dmtable_info acpi_dm_table_info_mcfg0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_rsdp1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_rsdp2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_sbst[];
+extern struct acpi_dmtable_info acpi_dm_table_info_slit[];
+extern struct acpi_dmtable_info acpi_dm_table_info_spcr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_spmi[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_tcpa[];
+extern struct acpi_dmtable_info acpi_dm_table_info_wdrt[];
+
+/*
+ * dmtable
+ */
+void acpi_dm_dump_data_table(struct acpi_table_header *table);
+
+void
+acpi_dm_dump_table(u32 table_length,
+ u32 table_offset,
+ void *table,
+ u32 sub_table_length, struct acpi_dmtable_info *info);
+
+void acpi_dm_line_header(u32 offset, u32 byte_length, char *name);
+
+void acpi_dm_line_header2(u32 offset, u32 byte_length, char *name, u32 value);
+
+/*
+ * dmtbdump
+ */
+void acpi_dm_dump_asf(struct acpi_table_header *table);
+
+void acpi_dm_dump_cpep(struct acpi_table_header *table);
+
+void acpi_dm_dump_fadt(struct acpi_table_header *table);
+
+void acpi_dm_dump_srat(struct acpi_table_header *table);
+
+void acpi_dm_dump_mcfg(struct acpi_table_header *table);
+
+void acpi_dm_dump_madt(struct acpi_table_header *table);
+
+u32 acpi_dm_dump_rsdp(struct acpi_table_header *table);
+
+void acpi_dm_dump_rsdt(struct acpi_table_header *table);
+
+void acpi_dm_dump_slit(struct acpi_table_header *table);
+
+void acpi_dm_dump_xsdt(struct acpi_table_header *table);
+
+/*
+ * dmwalk
+ */
+void
+acpi_dm_disassemble(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *origin, u32 num_opcodes);
+
+void
+acpi_dm_walk_parse_tree(union acpi_parse_object *op,
+ asl_walk_callback descending_callback,
+ asl_walk_callback ascending_callback, void *context);
+
+/*
+ * dmopcode
+ */
+void
+acpi_dm_disassemble_one_op(struct acpi_walk_state *walk_state,
+ struct acpi_op_walk_info *info,
+ union acpi_parse_object *op);
+
+void acpi_dm_decode_internal_object(union acpi_operand_object *obj_desc);
+
+u32 acpi_dm_list_type(union acpi_parse_object *op);
+
+void acpi_dm_method_flags(union acpi_parse_object *op);
+
+void acpi_dm_field_flags(union acpi_parse_object *op);
+
+void acpi_dm_address_space(u8 space_id);
+
+void acpi_dm_region_flags(union acpi_parse_object *op);
+
+void acpi_dm_match_op(union acpi_parse_object *op);
+
+u8 acpi_dm_comma_if_list_member(union acpi_parse_object *op);
+
+void acpi_dm_comma_if_field_member(union acpi_parse_object *op);
+
+/*
+ * dmnames
+ */
+u32 acpi_dm_dump_name(char *name);
+
+acpi_status
+acpi_ps_display_object_pathname(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+void acpi_dm_namestring(char *name);
+
+/*
+ * dmobject
+ */
+void
+acpi_dm_display_internal_object(union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+void acpi_dm_display_arguments(struct acpi_walk_state *walk_state);
+
+void acpi_dm_display_locals(struct acpi_walk_state *walk_state);
+
+void
+acpi_dm_dump_method_info(acpi_status status,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+/*
+ * dmbuffer
+ */
+void acpi_dm_disasm_byte_list(u32 level, u8 * byte_data, u32 byte_count);
+
+void
+acpi_dm_byte_list(struct acpi_op_walk_info *info, union acpi_parse_object *op);
+
+void acpi_dm_is_eisa_id(union acpi_parse_object *op);
+
+void acpi_dm_eisa_id(u32 encoded_id);
+
+u8 acpi_dm_is_unicode_buffer(union acpi_parse_object *op);
+
+u8 acpi_dm_is_string_buffer(union acpi_parse_object *op);
+
+/*
+ * dmresrc
+ */
+void acpi_dm_dump_integer8(u8 value, char *name);
+
+void acpi_dm_dump_integer16(u16 value, char *name);
+
+void acpi_dm_dump_integer32(u32 value, char *name);
+
+void acpi_dm_dump_integer64(u64 value, char *name);
+
+void
+acpi_dm_resource_template(struct acpi_op_walk_info *info,
+ union acpi_parse_object *op,
+ u8 * byte_data, u32 byte_count);
+
+u8 acpi_dm_is_resource_template(union acpi_parse_object *op);
+
+void acpi_dm_indent(u32 level);
+
+void acpi_dm_bit_list(u16 mask);
+
+void acpi_dm_decode_attribute(u8 attribute);
+
+void acpi_dm_descriptor_name(void);
+
+/*
+ * dmresrcl
+ */
+void
+acpi_dm_word_descriptor(union aml_resource *resource, u32 length, u32 level);
+
+void
+acpi_dm_dword_descriptor(union aml_resource *resource, u32 length, u32 level);
+
+void
+acpi_dm_extended_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_qword_descriptor(union aml_resource *resource, u32 length, u32 level);
+
+void
+acpi_dm_memory24_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_memory32_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_fixed_memory32_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_generic_register_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_interrupt_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_vendor_large_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void acpi_dm_vendor_common(char *name, u8 * byte_data, u32 length, u32 level);
+
+/*
+ * dmresrcs
+ */
+void
+acpi_dm_irq_descriptor(union aml_resource *resource, u32 length, u32 level);
+
+void
+acpi_dm_dma_descriptor(union aml_resource *resource, u32 length, u32 level);
+
+void acpi_dm_io_descriptor(union aml_resource *resource, u32 length, u32 level);
+
+void
+acpi_dm_fixed_io_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_start_dependent_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_end_dependent_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+void
+acpi_dm_vendor_small_descriptor(union aml_resource *resource,
+ u32 length, u32 level);
+
+/*
+ * dmutils
+ */
+void acpi_dm_add_to_external_list(char *path, u8 type, u32 value);
+
+/*
+ * dmrestag
+ */
+void acpi_dm_find_resources(union acpi_parse_object *root);
+
+void
+acpi_dm_check_resource_reference(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
+
+#endif /* __ACDISASM_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acdispat.h - dispatcher (parser to interpreter interface)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ACDISPAT_H_
+#define _ACDISPAT_H_
+
+#define NAMEOF_LOCAL_NTE "__L0"
+#define NAMEOF_ARG_NTE "__A0"
+
+/*
+ * dsopcode - support for late evaluation
+ */
+acpi_status
+acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc);
+
+acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *rgn_desc);
+
+acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc);
+
+acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc);
+
+acpi_status
+acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+acpi_status
+acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+acpi_status
+acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object *obj_desc);
+
+acpi_status acpi_ds_initialize_region(acpi_handle obj_handle);
+
+/*
+ * dsctrl - Parser/Interpreter interface, control stack routines
+ */
+acpi_status
+acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+acpi_status
+acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+/*
+ * dsexec - Parser/Interpreter interface, method execution callbacks
+ */
+acpi_status
+acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *result_obj);
+
+acpi_status
+acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op);
+
+acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *state);
+
+/*
+ * dsfield - Parser/Interpreter interface for AML fields
+ */
+acpi_status
+acpi_ds_create_field(union acpi_parse_object *op,
+ struct acpi_namespace_node *region_node,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_create_bank_field(union acpi_parse_object *op,
+ struct acpi_namespace_node *region_node,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_create_index_field(union acpi_parse_object *op,
+ struct acpi_namespace_node *region_node,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_create_buffer_field(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_init_field_objects(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
+
+/*
+ * dsload - Parser/Interpreter interface, namespace load callbacks
+ */
+acpi_status
+acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op);
+
+acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op);
+
+acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
+
+/*
+ * dsmthdat - method data (locals/args)
+ */
+acpi_status
+acpi_ds_store_object_to_local(u16 opcode,
+ u32 index,
+ union acpi_operand_object *src_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_method_data_get_entry(u16 opcode,
+ u32 index,
+ struct acpi_walk_state *walk_state,
+ union acpi_operand_object ***node);
+
+void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state);
+
+u8 acpi_ds_is_method_value(union acpi_operand_object *obj_desc);
+
+acpi_status
+acpi_ds_method_data_get_value(u16 opcode,
+ u32 index,
+ struct acpi_walk_state *walk_state,
+ union acpi_operand_object **dest_desc);
+
+acpi_status
+acpi_ds_method_data_init_args(union acpi_operand_object **params,
+ u32 max_param_count,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_method_data_get_node(u16 opcode,
+ u32 index,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node **node);
+
+void acpi_ds_method_data_init(struct acpi_walk_state *walk_state);
+
+/*
+ * dsmethod - Parser/Interpreter interface - control method parsing
+ */
+acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node);
+
+acpi_status
+acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+acpi_status
+acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *return_desc);
+
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state);
+
+/*
+ * dsinit
+ */
+acpi_status
+acpi_ds_initialize_objects(struct acpi_table_desc *table_desc,
+ struct acpi_namespace_node *start_node);
+
+/*
+ * dsobject - Parser/Interpreter interface - object initialization and conversion
+ */
+acpi_status
+acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 buffer_length,
+ union acpi_operand_object **obj_desc_ptr);
+
+acpi_status
+acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 package_length,
+ union acpi_operand_object **obj_desc);
+
+acpi_status
+acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u16 opcode, union acpi_operand_object **obj_desc);
+
+acpi_status
+acpi_ds_create_node(struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node *node,
+ union acpi_parse_object *op);
+
+/*
+ * dsutils - Parser/Interpreter interface utility routines
+ */
+void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state);
+
+u8
+acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
+ struct acpi_walk_state *walk_state,
+ u8 add_reference);
+
+u8
+acpi_ds_is_result_used(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
+
+void
+acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
+ union acpi_operand_object *result_obj,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_create_operand(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *arg, u32 args_remaining);
+
+acpi_status
+acpi_ds_create_operands(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *first_arg);
+
+acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state);
+
+void acpi_ds_clear_operands(struct acpi_walk_state *walk_state);
+
+/*
+ * dswscope - Scope Stack manipulation
+ */
+acpi_status
+acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
+ acpi_object_type type,
+ struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state);
+
+void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state);
+
+/*
+ * dswstate - parser WALK_STATE management routines
+ */
+acpi_status
+acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state);
+
+struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
+ union acpi_parse_object
+ *origin,
+ union acpi_operand_object
+ *mth_desc,
+ struct acpi_thread_state
+ *thread);
+
+acpi_status
+acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ struct acpi_namespace_node *method_node,
+ u8 * aml_start,
+ u32 aml_length,
+ struct acpi_evaluate_info *info, u8 pass_number);
+
+acpi_status
+acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
+ struct acpi_walk_state *walk_state);
+
+void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state);
+
+struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state
+ *thread);
+
+void
+acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
+ struct acpi_thread_state *thread);
+
+acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ds_result_stack_clear(struct acpi_walk_state *walk_state);
+
+struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
+ *thread);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_ds_result_remove(union acpi_operand_object **object,
+ u32 index, struct acpi_walk_state *walk_state);
+#endif
+
+acpi_status
+acpi_ds_result_pop(union acpi_operand_object **object,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_result_push(union acpi_operand_object *object,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ds_result_pop_from_bottom(union acpi_operand_object **object,
+ struct acpi_walk_state *walk_state);
+
+#endif /* _ACDISPAT_H_ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acevents.h - Event subcomponent prototypes and defines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACEVENTS_H__
+#define __ACEVENTS_H__
+
+/*
+ * evevent
+ */
+acpi_status acpi_ev_initialize_events(void);
+
+acpi_status acpi_ev_install_xrupt_handlers(void);
+
+acpi_status acpi_ev_install_fadt_gpes(void);
+
+u32 acpi_ev_fixed_event_detect(void);
+
+/*
+ * evmisc
+ */
+u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout);
+
+acpi_status acpi_ev_release_global_lock(void);
+
+acpi_status acpi_ev_init_global_lock_handler(void);
+
+u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
+
+acpi_status
+acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
+ u32 notify_value);
+
+/*
+ * evgpe - GPE handling and dispatch
+ */
+acpi_status
+acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
+ u8 type);
+
+acpi_status
+acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
+ u8 write_to_hardware);
+
+acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
+ u32 gpe_number);
+
+/*
+ * evgpeblk
+ */
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback);
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block);
+
+acpi_status
+acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
+ struct acpi_generic_address *gpe_block_address,
+ u32 register_count,
+ u8 gpe_block_base_number,
+ u32 interrupt_number,
+ struct acpi_gpe_block_info **return_gpe_block);
+
+acpi_status
+acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
+ struct acpi_gpe_block_info *gpe_block);
+
+acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
+
+u32
+acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
+ u32 gpe_number);
+
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+
+acpi_status
+acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type);
+
+acpi_status
+acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status acpi_ev_gpe_initialize(void);
+
+/*
+ * evregion - Address Space handling
+ */
+acpi_status acpi_ev_install_region_handlers(void);
+
+acpi_status acpi_ev_initialize_op_regions(void);
+
+acpi_status
+acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ u32 function,
+ acpi_physical_address address,
+ u32 bit_width, acpi_integer * value);
+
+acpi_status
+acpi_ev_attach_region(union acpi_operand_object *handler_obj,
+ union acpi_operand_object *region_obj,
+ u8 acpi_ns_is_locked);
+
+void
+acpi_ev_detach_region(union acpi_operand_object *region_obj,
+ u8 acpi_ns_is_locked);
+
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context);
+
+acpi_status
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id);
+
+acpi_status
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
+
+/*
+ * evregini - Region initialization and setup
+ */
+acpi_status
+acpi_ev_system_memory_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context,
+ void **region_context);
+
+acpi_status
+acpi_ev_io_space_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_pci_config_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_cmos_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_pci_bar_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_default_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_initialize_region(union acpi_operand_object *region_obj,
+ u8 acpi_ns_locked);
+
+/*
+ * evsci - SCI (System Control Interrupt) handling/dispatch
+ */
+u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
+
+u32 acpi_ev_install_sci_handler(void);
+
+acpi_status acpi_ev_remove_sci_handler(void);
+
+u32 acpi_ev_initialize_sCI(u32 program_sCI);
+
+void acpi_ev_terminate(void);
+
+#endif /* __ACEVENTS_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acexcep.h - Exception codes returned by the ACPI subsystem
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACEXCEP_H__
+#define __ACEXCEP_H__
+
+/*
+ * Exceptions returned by external ACPI interfaces
+ */
+#define AE_CODE_ENVIRONMENTAL 0x0000
+#define AE_CODE_PROGRAMMER 0x1000
+#define AE_CODE_ACPI_TABLES 0x2000
+#define AE_CODE_AML 0x3000
+#define AE_CODE_CONTROL 0x4000
+#define AE_CODE_MASK 0xF000
+
+#define ACPI_SUCCESS(a) (!(a))
+#define ACPI_FAILURE(a) (a)
+
+#define AE_OK (acpi_status) 0x0000
+
+/*
+ * Environmental exceptions
+ */
+#define AE_ERROR (acpi_status) (0x0001 | AE_CODE_ENVIRONMENTAL)
+#define AE_NO_ACPI_TABLES (acpi_status) (0x0002 | AE_CODE_ENVIRONMENTAL)
+#define AE_NO_NAMESPACE (acpi_status) (0x0003 | AE_CODE_ENVIRONMENTAL)
+#define AE_NO_MEMORY (acpi_status) (0x0004 | AE_CODE_ENVIRONMENTAL)
+#define AE_NOT_FOUND (acpi_status) (0x0005 | AE_CODE_ENVIRONMENTAL)
+#define AE_NOT_EXIST (acpi_status) (0x0006 | AE_CODE_ENVIRONMENTAL)
+#define AE_ALREADY_EXISTS (acpi_status) (0x0007 | AE_CODE_ENVIRONMENTAL)
+#define AE_TYPE (acpi_status) (0x0008 | AE_CODE_ENVIRONMENTAL)
+#define AE_NULL_OBJECT (acpi_status) (0x0009 | AE_CODE_ENVIRONMENTAL)
+#define AE_NULL_ENTRY (acpi_status) (0x000A | AE_CODE_ENVIRONMENTAL)
+#define AE_BUFFER_OVERFLOW (acpi_status) (0x000B | AE_CODE_ENVIRONMENTAL)
+#define AE_STACK_OVERFLOW (acpi_status) (0x000C | AE_CODE_ENVIRONMENTAL)
+#define AE_STACK_UNDERFLOW (acpi_status) (0x000D | AE_CODE_ENVIRONMENTAL)
+#define AE_NOT_IMPLEMENTED (acpi_status) (0x000E | AE_CODE_ENVIRONMENTAL)
+#define AE_VERSION_MISMATCH (acpi_status) (0x000F | AE_CODE_ENVIRONMENTAL)
+#define AE_SUPPORT (acpi_status) (0x0010 | AE_CODE_ENVIRONMENTAL)
+#define AE_SHARE (acpi_status) (0x0011 | AE_CODE_ENVIRONMENTAL)
+#define AE_LIMIT (acpi_status) (0x0012 | AE_CODE_ENVIRONMENTAL)
+#define AE_TIME (acpi_status) (0x0013 | AE_CODE_ENVIRONMENTAL)
+#define AE_UNKNOWN_STATUS (acpi_status) (0x0014 | AE_CODE_ENVIRONMENTAL)
+#define AE_ACQUIRE_DEADLOCK (acpi_status) (0x0015 | AE_CODE_ENVIRONMENTAL)
+#define AE_RELEASE_DEADLOCK (acpi_status) (0x0016 | AE_CODE_ENVIRONMENTAL)
+#define AE_NOT_ACQUIRED (acpi_status) (0x0017 | AE_CODE_ENVIRONMENTAL)
+#define AE_ALREADY_ACQUIRED (acpi_status) (0x0018 | AE_CODE_ENVIRONMENTAL)
+#define AE_NO_HARDWARE_RESPONSE (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
+#define AE_NO_GLOBAL_LOCK (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
+#define AE_LOGICAL_ADDRESS (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
+#define AE_ABORT_METHOD (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL)
+#define AE_SAME_HANDLER (acpi_status) (0x001D | AE_CODE_ENVIRONMENTAL)
+#define AE_WAKE_ONLY_GPE (acpi_status) (0x001E | AE_CODE_ENVIRONMENTAL)
+#define AE_OWNER_ID_LIMIT (acpi_status) (0x001F | AE_CODE_ENVIRONMENTAL)
+
+#define AE_CODE_ENV_MAX 0x001F
+
+/*
+ * Programmer exceptions
+ */
+#define AE_BAD_PARAMETER (acpi_status) (0x0001 | AE_CODE_PROGRAMMER)
+#define AE_BAD_CHARACTER (acpi_status) (0x0002 | AE_CODE_PROGRAMMER)
+#define AE_BAD_PATHNAME (acpi_status) (0x0003 | AE_CODE_PROGRAMMER)
+#define AE_BAD_DATA (acpi_status) (0x0004 | AE_CODE_PROGRAMMER)
+#define AE_BAD_ADDRESS (acpi_status) (0x0005 | AE_CODE_PROGRAMMER)
+#define AE_ALIGNMENT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER)
+#define AE_BAD_HEX_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER)
+#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0008 | AE_CODE_PROGRAMMER)
+#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0009 | AE_CODE_PROGRAMMER)
+
+#define AE_CODE_PGM_MAX 0x0009
+
+/*
+ * Acpi table exceptions
+ */
+#define AE_BAD_SIGNATURE (acpi_status) (0x0001 | AE_CODE_ACPI_TABLES)
+#define AE_BAD_HEADER (acpi_status) (0x0002 | AE_CODE_ACPI_TABLES)
+#define AE_BAD_CHECKSUM (acpi_status) (0x0003 | AE_CODE_ACPI_TABLES)
+#define AE_BAD_VALUE (acpi_status) (0x0004 | AE_CODE_ACPI_TABLES)
+#define AE_TABLE_NOT_SUPPORTED (acpi_status) (0x0005 | AE_CODE_ACPI_TABLES)
+#define AE_INVALID_TABLE_LENGTH (acpi_status) (0x0006 | AE_CODE_ACPI_TABLES)
+
+#define AE_CODE_TBL_MAX 0x0006
+
+/*
+ * AML exceptions. These are caused by problems with
+ * the actual AML byte stream
+ */
+#define AE_AML_ERROR (acpi_status) (0x0001 | AE_CODE_AML)
+#define AE_AML_PARSE (acpi_status) (0x0002 | AE_CODE_AML)
+#define AE_AML_BAD_OPCODE (acpi_status) (0x0003 | AE_CODE_AML)
+#define AE_AML_NO_OPERAND (acpi_status) (0x0004 | AE_CODE_AML)
+#define AE_AML_OPERAND_TYPE (acpi_status) (0x0005 | AE_CODE_AML)
+#define AE_AML_OPERAND_VALUE (acpi_status) (0x0006 | AE_CODE_AML)
+#define AE_AML_UNINITIALIZED_LOCAL (acpi_status) (0x0007 | AE_CODE_AML)
+#define AE_AML_UNINITIALIZED_ARG (acpi_status) (0x0008 | AE_CODE_AML)
+#define AE_AML_UNINITIALIZED_ELEMENT (acpi_status) (0x0009 | AE_CODE_AML)
+#define AE_AML_NUMERIC_OVERFLOW (acpi_status) (0x000A | AE_CODE_AML)
+#define AE_AML_REGION_LIMIT (acpi_status) (0x000B | AE_CODE_AML)
+#define AE_AML_BUFFER_LIMIT (acpi_status) (0x000C | AE_CODE_AML)
+#define AE_AML_PACKAGE_LIMIT (acpi_status) (0x000D | AE_CODE_AML)
+#define AE_AML_DIVIDE_BY_ZERO (acpi_status) (0x000E | AE_CODE_AML)
+#define AE_AML_BAD_NAME (acpi_status) (0x000F | AE_CODE_AML)
+#define AE_AML_NAME_NOT_FOUND (acpi_status) (0x0010 | AE_CODE_AML)
+#define AE_AML_INTERNAL (acpi_status) (0x0011 | AE_CODE_AML)
+#define AE_AML_INVALID_SPACE_ID (acpi_status) (0x0012 | AE_CODE_AML)
+#define AE_AML_STRING_LIMIT (acpi_status) (0x0013 | AE_CODE_AML)
+#define AE_AML_NO_RETURN_VALUE (acpi_status) (0x0014 | AE_CODE_AML)
+#define AE_AML_METHOD_LIMIT (acpi_status) (0x0015 | AE_CODE_AML)
+#define AE_AML_NOT_OWNER (acpi_status) (0x0016 | AE_CODE_AML)
+#define AE_AML_MUTEX_ORDER (acpi_status) (0x0017 | AE_CODE_AML)
+#define AE_AML_MUTEX_NOT_ACQUIRED (acpi_status) (0x0018 | AE_CODE_AML)
+#define AE_AML_INVALID_RESOURCE_TYPE (acpi_status) (0x0019 | AE_CODE_AML)
+#define AE_AML_INVALID_INDEX (acpi_status) (0x001A | AE_CODE_AML)
+#define AE_AML_REGISTER_LIMIT (acpi_status) (0x001B | AE_CODE_AML)
+#define AE_AML_NO_WHILE (acpi_status) (0x001C | AE_CODE_AML)
+#define AE_AML_ALIGNMENT (acpi_status) (0x001D | AE_CODE_AML)
+#define AE_AML_NO_RESOURCE_END_TAG (acpi_status) (0x001E | AE_CODE_AML)
+#define AE_AML_BAD_RESOURCE_VALUE (acpi_status) (0x001F | AE_CODE_AML)
+#define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x0020 | AE_CODE_AML)
+#define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x0021 | AE_CODE_AML)
+#define AE_AML_ILLEGAL_ADDRESS (acpi_status) (0x0022 | AE_CODE_AML)
+
+#define AE_CODE_AML_MAX 0x0022
+
+/*
+ * Internal exceptions used for control
+ */
+#define AE_CTRL_RETURN_VALUE (acpi_status) (0x0001 | AE_CODE_CONTROL)
+#define AE_CTRL_PENDING (acpi_status) (0x0002 | AE_CODE_CONTROL)
+#define AE_CTRL_TERMINATE (acpi_status) (0x0003 | AE_CODE_CONTROL)
+#define AE_CTRL_TRUE (acpi_status) (0x0004 | AE_CODE_CONTROL)
+#define AE_CTRL_FALSE (acpi_status) (0x0005 | AE_CODE_CONTROL)
+#define AE_CTRL_DEPTH (acpi_status) (0x0006 | AE_CODE_CONTROL)
+#define AE_CTRL_END (acpi_status) (0x0007 | AE_CODE_CONTROL)
+#define AE_CTRL_TRANSFER (acpi_status) (0x0008 | AE_CODE_CONTROL)
+#define AE_CTRL_BREAK (acpi_status) (0x0009 | AE_CODE_CONTROL)
+#define AE_CTRL_CONTINUE (acpi_status) (0x000A | AE_CODE_CONTROL)
+#define AE_CTRL_SKIP (acpi_status) (0x000B | AE_CODE_CONTROL)
+
+#define AE_CODE_CTRL_MAX 0x000B
+
+#ifdef DEFINE_ACPI_GLOBALS
+
+/*
+ * String versions of the exception codes above
+ * These strings must match the corresponding defines exactly
+ */
+char const *acpi_gbl_exception_names_env[] = {
+ "AE_OK",
+ "AE_ERROR",
+ "AE_NO_ACPI_TABLES",
+ "AE_NO_NAMESPACE",
+ "AE_NO_MEMORY",
+ "AE_NOT_FOUND",
+ "AE_NOT_EXIST",
+ "AE_ALREADY_EXISTS",
+ "AE_TYPE",
+ "AE_NULL_OBJECT",
+ "AE_NULL_ENTRY",
+ "AE_BUFFER_OVERFLOW",
+ "AE_STACK_OVERFLOW",
+ "AE_STACK_UNDERFLOW",
+ "AE_NOT_IMPLEMENTED",
+ "AE_VERSION_MISMATCH",
+ "AE_SUPPORT",
+ "AE_SHARE",
+ "AE_LIMIT",
+ "AE_TIME",
+ "AE_UNKNOWN_STATUS",
+ "AE_ACQUIRE_DEADLOCK",
+ "AE_RELEASE_DEADLOCK",
+ "AE_NOT_ACQUIRED",
+ "AE_ALREADY_ACQUIRED",
+ "AE_NO_HARDWARE_RESPONSE",
+ "AE_NO_GLOBAL_LOCK",
+ "AE_LOGICAL_ADDRESS",
+ "AE_ABORT_METHOD",
+ "AE_SAME_HANDLER",
+ "AE_WAKE_ONLY_GPE",
+ "AE_OWNER_ID_LIMIT"
+};
+
+char const *acpi_gbl_exception_names_pgm[] = {
+ "AE_BAD_PARAMETER",
+ "AE_BAD_CHARACTER",
+ "AE_BAD_PATHNAME",
+ "AE_BAD_DATA",
+ "AE_BAD_ADDRESS",
+ "AE_ALIGNMENT",
+ "AE_BAD_HEX_CONSTANT",
+ "AE_BAD_OCTAL_CONSTANT",
+ "AE_BAD_DECIMAL_CONSTANT"
+};
+
+char const *acpi_gbl_exception_names_tbl[] = {
+ "AE_BAD_SIGNATURE",
+ "AE_BAD_HEADER",
+ "AE_BAD_CHECKSUM",
+ "AE_BAD_VALUE",
+ "AE_TABLE_NOT_SUPPORTED",
+ "AE_INVALID_TABLE_LENGTH"
+};
+
+char const *acpi_gbl_exception_names_aml[] = {
+ "AE_AML_ERROR",
+ "AE_AML_PARSE",
+ "AE_AML_BAD_OPCODE",
+ "AE_AML_NO_OPERAND",
+ "AE_AML_OPERAND_TYPE",
+ "AE_AML_OPERAND_VALUE",
+ "AE_AML_UNINITIALIZED_LOCAL",
+ "AE_AML_UNINITIALIZED_ARG",
+ "AE_AML_UNINITIALIZED_ELEMENT",
+ "AE_AML_NUMERIC_OVERFLOW",
+ "AE_AML_REGION_LIMIT",
+ "AE_AML_BUFFER_LIMIT",
+ "AE_AML_PACKAGE_LIMIT",
+ "AE_AML_DIVIDE_BY_ZERO",
+ "AE_AML_BAD_NAME",
+ "AE_AML_NAME_NOT_FOUND",
+ "AE_AML_INTERNAL",
+ "AE_AML_INVALID_SPACE_ID",
+ "AE_AML_STRING_LIMIT",
+ "AE_AML_NO_RETURN_VALUE",
+ "AE_AML_METHOD_LIMIT",
+ "AE_AML_NOT_OWNER",
+ "AE_AML_MUTEX_ORDER",
+ "AE_AML_MUTEX_NOT_ACQUIRED",
+ "AE_AML_INVALID_RESOURCE_TYPE",
+ "AE_AML_INVALID_INDEX",
+ "AE_AML_REGISTER_LIMIT",
+ "AE_AML_NO_WHILE",
+ "AE_AML_ALIGNMENT",
+ "AE_AML_NO_RESOURCE_END_TAG",
+ "AE_AML_BAD_RESOURCE_VALUE",
+ "AE_AML_CIRCULAR_REFERENCE",
+ "AE_AML_BAD_RESOURCE_LENGTH",
+ "AE_AML_ILLEGAL_ADDRESS"
+};
+
+char const *acpi_gbl_exception_names_ctrl[] = {
+ "AE_CTRL_RETURN_VALUE",
+ "AE_CTRL_PENDING",
+ "AE_CTRL_TERMINATE",
+ "AE_CTRL_TRUE",
+ "AE_CTRL_FALSE",
+ "AE_CTRL_DEPTH",
+ "AE_CTRL_END",
+ "AE_CTRL_TRANSFER",
+ "AE_CTRL_BREAK",
+ "AE_CTRL_CONTINUE",
+ "AE_CTRL_SKIP"
+};
+
+#endif /* ACPI GLOBALS */
+
+#endif /* __ACEXCEP_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acglobal.h - Declarations for global variables
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACGLOBAL_H__
+#define __ACGLOBAL_H__
+
+/*
+ * Ensure that the globals are actually defined and initialized only once.
+ *
+ * The use of these macros allows a single list of globals (here) in order
+ * to simplify maintenance of the code.
+ */
+#ifdef DEFINE_ACPI_GLOBALS
+#define ACPI_EXTERN
+#define ACPI_INIT_GLOBAL(a,b) a=b
+#else
+#define ACPI_EXTERN extern
+#define ACPI_INIT_GLOBAL(a,b) a
+#endif
+
+/*
+ * Keep local copies of these FADT-based registers. NOTE: These globals
+ * are first in this file for alignment reasons on 64-bit systems.
+ */
+ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
+ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+
+/*****************************************************************************
+ *
+ * Debug support
+ *
+ ****************************************************************************/
+
+/* Runtime configuration of debug print levels */
+
+extern u32 acpi_dbg_level;
+extern u32 acpi_dbg_layer;
+
+/* Procedure nesting level for debug output */
+
+extern u32 acpi_gbl_nesting_level;
+
+/* Support for dynamic control method tracing mechanism */
+
+ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
+ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+ACPI_EXTERN u32 acpi_gbl_trace_flags;
+
+/*****************************************************************************
+ *
+ * Runtime configuration (static defaults that can be overriden at runtime)
+ *
+ ****************************************************************************/
+
+/*
+ * Enable "slack" in the AML interpreter? Default is FALSE, and the
+ * interpreter strictly follows the ACPI specification. Setting to TRUE
+ * allows the interpreter to ignore certain errors and/or bad AML constructs.
+ *
+ * Currently, these features are enabled by this flag:
+ *
+ * 1) Allow "implicit return" of last value in a control method
+ * 2) Allow access beyond the end of an operation region
+ * 3) Allow access to uninitialized locals/args (auto-init to integer 0)
+ * 4) Allow ANY object type to be a source operand for the Store() operator
+ * 5) Allow unresolved references (invalid target name) in package objects
+ * 6) Enable warning messages for behavior that is not ACPI spec compliant
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
+
+/*
+ * Automatically serialize ALL control methods? Default is FALSE, meaning
+ * to use the Serialized/not_serialized method flags on a per method basis.
+ * Only change this if the ASL code is poorly written and cannot handle
+ * reentrancy even though methods are marked "NotSerialized".
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
+
+/*
+ * Create the predefined _OSI method in the namespace? Default is TRUE
+ * because ACPI CA is fully compatible with other ACPI implementations.
+ * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
+
+/*
+ * Disable wakeup GPEs during runtime? Default is TRUE because WAKE and
+ * RUNTIME GPEs should never be shared, and WAKE GPEs should typically only
+ * be enabled just before going to sleep.
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
+
+/*****************************************************************************
+ *
+ * ACPI Table globals
+ *
+ ****************************************************************************/
+
+/*
+ * Table pointers.
+ * Although these pointers are somewhat redundant with the global acpi_table,
+ * they are convenient because they are typed pointers.
+ *
+ * These tables are single-table only; meaning that there can be at most one
+ * of each in the system. Each global points to the actual table.
+ */
+ACPI_EXTERN u32 acpi_gbl_table_flags;
+ACPI_EXTERN u32 acpi_gbl_rsdt_table_count;
+ACPI_EXTERN struct rsdp_descriptor *acpi_gbl_RSDP;
+ACPI_EXTERN struct xsdt_descriptor *acpi_gbl_XSDT;
+ACPI_EXTERN struct fadt_descriptor *acpi_gbl_FADT;
+ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
+ACPI_EXTERN struct facs_descriptor *acpi_gbl_FACS;
+ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS;
+/*
+ * Since there may be multiple SSDTs and PSDTs, a single pointer is not
+ * sufficient; Therefore, there isn't one!
+ */
+
+/* The root table can be either an RSDT or an XSDT */
+
+ACPI_EXTERN u8 acpi_gbl_root_table_type;
+#define ACPI_TABLE_TYPE_RSDT 'R'
+#define ACPI_TABLE_TYPE_XSDT 'X'
+
+/*
+ * Handle both ACPI 1.0 and ACPI 2.0 Integer widths:
+ * If we are executing a method that exists in a 32-bit ACPI table,
+ * use only the lower 32 bits of the (internal) 64-bit Integer.
+ */
+ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
+ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
+ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+
+/*
+ * ACPI Table info arrays
+ */
+extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
+extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
+
+/*****************************************************************************
+ *
+ * Mutual exlusion within ACPICA subsystem
+ *
+ ****************************************************************************/
+
+/*
+ * Predefined mutex objects. This array contains the
+ * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
+ * (The table maps local handles to the real OS handles)
+ */
+ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
+
+/*
+ * Global lock semaphore works in conjunction with the actual HW global lock
+ */
+ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
+
+/*
+ * Spinlocks are used for interfaces that can be possibly called at
+ * interrupt level
+ */
+ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
+#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
+
+/*****************************************************************************
+ *
+ * Miscellaneous globals
+ *
+ ****************************************************************************/
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+/* Lists for tracking memory allocations */
+
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
+#endif
+
+/* Object caches */
+
+ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
+
+/* Global handlers */
+
+ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify;
+ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
+ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
+ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
+ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
+
+/* Misc */
+
+ACPI_EXTERN u32 acpi_gbl_global_lock_thread_count;
+ACPI_EXTERN u32 acpi_gbl_original_mode;
+ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
+ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
+ACPI_EXTERN u32 acpi_gbl_ps_find_count;
+ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
+ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
+ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
+ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
+ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
+ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
+ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
+ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
+ACPI_EXTERN u8 acpi_gbl_global_lock_present;
+ACPI_EXTERN u8 acpi_gbl_events_initialized;
+ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
+
+extern u8 acpi_gbl_shutdown;
+extern u32 acpi_gbl_startup_flags;
+extern const u8 acpi_gbl_decode_to8bit[8];
+extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
+extern const char *acpi_gbl_highest_dstate_names[4];
+extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
+extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
+
+/*****************************************************************************
+ *
+ * Namespace globals
+ *
+ ****************************************************************************/
+
+#define NUM_NS_TYPES ACPI_TYPE_INVALID+1
+
+#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
+#define NUM_PREDEFINED_NAMES 10
+#else
+#define NUM_PREDEFINED_NAMES 9
+#endif
+
+ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
+ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
+ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
+
+extern const u8 acpi_gbl_ns_properties[NUM_NS_TYPES];
+extern const struct acpi_predefined_names
+ acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
+
+#ifdef ACPI_DEBUG_OUTPUT
+ACPI_EXTERN u32 acpi_gbl_current_node_count;
+ACPI_EXTERN u32 acpi_gbl_current_node_size;
+ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
+ACPI_EXTERN acpi_size acpi_gbl_entry_stack_pointer;
+ACPI_EXTERN acpi_size acpi_gbl_lowest_stack_pointer;
+ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
+#endif
+
+/*****************************************************************************
+ *
+ * Interpreter globals
+ *
+ ****************************************************************************/
+
+ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list;
+
+/* Control method single step flag */
+
+ACPI_EXTERN u8 acpi_gbl_cm_single_step;
+
+/*****************************************************************************
+ *
+ * Hardware globals
+ *
+ ****************************************************************************/
+
+extern struct acpi_bit_register_info
+ acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
+ACPI_EXTERN u8 acpi_gbl_sleep_type_a;
+ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
+
+/*****************************************************************************
+ *
+ * Event and GPE globals
+ *
+ ****************************************************************************/
+
+extern struct acpi_fixed_event_info
+ acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
+ACPI_EXTERN struct acpi_fixed_event_handler
+ acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
+ACPI_EXTERN struct acpi_gpe_block_info
+ *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+
+/*****************************************************************************
+ *
+ * Debugger globals
+ *
+ ****************************************************************************/
+
+ACPI_EXTERN u8 acpi_gbl_db_output_flags;
+
+#ifdef ACPI_DISASSEMBLER
+
+ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
+ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
+#endif
+
+#ifdef ACPI_DEBUGGER
+
+extern u8 acpi_gbl_method_executing;
+extern u8 acpi_gbl_abort_method;
+extern u8 acpi_gbl_db_terminate_threads;
+
+ACPI_EXTERN int optind;
+ACPI_EXTERN char *optarg;
+
+ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
+ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
+ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
+
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN char acpi_gbl_db_line_buf[80];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[80];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[40];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[40];
+ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
+ACPI_EXTERN char *acpi_gbl_db_buffer;
+ACPI_EXTERN char *acpi_gbl_db_filename;
+ACPI_EXTERN u32 acpi_gbl_db_debug_level;
+ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
+ACPI_EXTERN struct acpi_table_header *acpi_gbl_db_table_ptr;
+ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+
+/*
+ * Statistic globals
+ */
+ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
+ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
+ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc;
+ACPI_EXTERN u16 acpi_gbl_node_type_count_misc;
+ACPI_EXTERN u32 acpi_gbl_num_nodes;
+ACPI_EXTERN u32 acpi_gbl_num_objects;
+
+ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree;
+ACPI_EXTERN u32 acpi_gbl_size_of_method_trees;
+ACPI_EXTERN u32 acpi_gbl_size_of_node_entries;
+ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
+
+#endif /* ACPI_DEBUGGER */
+
+#endif /* __ACGLOBAL_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: achware.h -- hardware specific interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACHWARE_H__
+#define __ACHWARE_H__
+
+/* PM Timer ticks per second (HZ) */
+
+#define PM_TIMER_FREQUENCY 3579545
+
+/* Values for the _SST reserved method */
+
+#define ACPI_SST_INDICATOR_OFF 0
+#define ACPI_SST_WORKING 1
+#define ACPI_SST_WAKING 2
+#define ACPI_SST_SLEEPING 3
+#define ACPI_SST_SLEEP_CONTEXT 4
+
+/* Prototypes */
+
+/*
+ * hwacpi - high level functions
+ */
+acpi_status acpi_hw_initialize(void);
+
+acpi_status acpi_hw_set_mode(u32 mode);
+
+u32 acpi_hw_get_mode(void);
+
+/*
+ * hwregs - ACPI Register I/O
+ */
+struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
+
+acpi_status
+acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value);
+
+acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value);
+
+acpi_status
+acpi_hw_low_level_read(u32 width,
+ u32 * value, struct acpi_generic_address *reg);
+
+acpi_status
+acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg);
+
+acpi_status acpi_hw_clear_acpi_status(u32 flags);
+
+/*
+ * hwgpe - GPE support
+ */
+acpi_status
+acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status
+acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block);
+
+acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status
+acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
+ acpi_event_status * event_status);
+#endif /* ACPI_FUTURE_USAGE */
+
+acpi_status acpi_hw_disable_all_gpes(void);
+
+acpi_status acpi_hw_enable_all_runtime_gpes(void);
+
+acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+
+acpi_status
+acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block);
+
+#ifdef ACPI_FUTURE_USAGE
+/*
+ * hwtimer - ACPI Timer prototypes
+ */
+acpi_status acpi_get_timer_resolution(u32 * resolution);
+
+acpi_status acpi_get_timer(u32 * ticks);
+
+acpi_status
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed);
+#endif /* ACPI_FUTURE_USAGE */
+
+#endif /* __ACHWARE_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acinterp.h - Interpreter subcomponent prototypes and defines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACINTERP_H__
+#define __ACINTERP_H__
+
+#define ACPI_WALK_OPERANDS (&(walk_state->operands [walk_state->num_operands -1]))
+
+/* Macros for tables used for debug output */
+
+#define ACPI_EXD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_operand_object,f)
+#define ACPI_EXD_NSOFFSET(f) (u8) ACPI_OFFSET (struct acpi_namespace_node,f)
+#define ACPI_EXD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_exdump_info))
+
+/*
+ * If possible, pack the following structures to byte alignment, since we
+ * don't care about performance for debug output. Two cases where we cannot
+ * pack the structures:
+ *
+ * 1) Hardware does not support misaligned memory transfers
+ * 2) Compiler does not support pointers within packed structures
+ */
+#if (!defined(ACPI_MISALIGNMENT_NOT_SUPPORTED) && !defined(ACPI_PACKED_POINTERS_NOT_SUPPORTED))
+#pragma pack(1)
+#endif
+
+typedef const struct acpi_exdump_info {
+ u8 opcode;
+ u8 offset;
+ char *name;
+
+} acpi_exdump_info;
+
+/* Values for the Opcode field above */
+
+#define ACPI_EXD_INIT 0
+#define ACPI_EXD_TYPE 1
+#define ACPI_EXD_UINT8 2
+#define ACPI_EXD_UINT16 3
+#define ACPI_EXD_UINT32 4
+#define ACPI_EXD_UINT64 5
+#define ACPI_EXD_LITERAL 6
+#define ACPI_EXD_POINTER 7
+#define ACPI_EXD_ADDRESS 8
+#define ACPI_EXD_STRING 9
+#define ACPI_EXD_BUFFER 10
+#define ACPI_EXD_PACKAGE 11
+#define ACPI_EXD_FIELD 12
+#define ACPI_EXD_REFERENCE 13
+
+/* restore default alignment */
+
+#pragma pack()
+
+/*
+ * exconvrt - object conversion
+ */
+acpi_status
+acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc, u32 flags);
+
+acpi_status
+acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc);
+
+acpi_status
+acpi_ex_convert_to_string(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc, u32 type);
+
+/* Types for ->String conversion */
+
+#define ACPI_EXPLICIT_BYTE_COPY 0x00000000
+#define ACPI_EXPLICIT_CONVERT_HEX 0x00000001
+#define ACPI_IMPLICIT_CONVERT_HEX 0x00000002
+#define ACPI_EXPLICIT_CONVERT_DECIMAL 0x00000003
+
+acpi_status
+acpi_ex_convert_to_target_type(acpi_object_type destination_type,
+ union acpi_operand_object *source_desc,
+ union acpi_operand_object **result_desc,
+ struct acpi_walk_state *walk_state);
+
+/*
+ * exfield - ACPI AML (p-code) execution - field manipulation
+ */
+acpi_status
+acpi_ex_common_buffer_setup(union acpi_operand_object *obj_desc,
+ u32 buffer_length, u32 * datum_count);
+
+acpi_status
+acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
+ acpi_integer mask,
+ acpi_integer field_value,
+ u32 field_datum_byte_offset);
+
+void
+acpi_ex_get_buffer_datum(acpi_integer * datum,
+ void *buffer,
+ u32 buffer_length,
+ u32 byte_granularity, u32 buffer_offset);
+
+void
+acpi_ex_set_buffer_datum(acpi_integer merged_datum,
+ void *buffer,
+ u32 buffer_length,
+ u32 byte_granularity, u32 buffer_offset);
+
+acpi_status
+acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **ret_buffer_desc);
+
+acpi_status
+acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc);
+
+/*
+ * exfldio - low level field I/O
+ */
+acpi_status
+acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
+ void *buffer, u32 buffer_length);
+
+acpi_status
+acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
+ void *buffer, u32 buffer_length);
+
+acpi_status
+acpi_ex_access_region(union acpi_operand_object *obj_desc,
+ u32 field_datum_byte_offset,
+ acpi_integer * value, u32 read_write);
+
+/*
+ * exmisc - misc support routines
+ */
+acpi_status
+acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_concat_template(union acpi_operand_object *obj_desc,
+ union acpi_operand_object *obj_desc2,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_do_concatenate(union acpi_operand_object *obj_desc,
+ union acpi_operand_object *obj_desc2,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_do_logical_numeric_op(u16 opcode,
+ acpi_integer integer0,
+ acpi_integer integer1, u8 * logical_result);
+
+acpi_status
+acpi_ex_do_logical_op(u16 opcode,
+ union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1, u8 * logical_result);
+
+acpi_integer
+acpi_ex_do_math_op(u16 opcode, acpi_integer operand0, acpi_integer operand1);
+
+acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_create_region(u8 * aml_start,
+ u32 aml_length,
+ u8 region_space, struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_create_method(u8 * aml_start,
+ u32 aml_length, struct acpi_walk_state *walk_state);
+
+/*
+ * exconfig - dynamic table load/unload
+ */
+acpi_status
+acpi_ex_load_op(union acpi_operand_object *obj_desc,
+ union acpi_operand_object *target,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
+ union acpi_operand_object **return_desc);
+
+acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle);
+
+/*
+ * exmutex - mutex support
+ */
+acpi_status
+acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state);
+
+void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread);
+
+void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc);
+
+/*
+ * exprep - ACPI AML execution - prep utilities
+ */
+acpi_status
+acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
+ u8 field_flags,
+ u8 field_attribute,
+ u32 field_bit_position, u32 field_bit_length);
+
+acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info);
+
+/*
+ * exsystem - Interface to OS services
+ */
+acpi_status
+acpi_ex_system_do_notify_op(union acpi_operand_object *value,
+ union acpi_operand_object *obj_desc);
+
+acpi_status acpi_ex_system_do_suspend(acpi_integer time);
+
+acpi_status acpi_ex_system_do_stall(u32 time);
+
+acpi_status
+acpi_ex_system_acquire_mutex(union acpi_operand_object *time,
+ union acpi_operand_object *obj_desc);
+
+acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc);
+
+acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc);
+
+acpi_status
+acpi_ex_system_wait_event(union acpi_operand_object *time,
+ union acpi_operand_object *obj_desc);
+
+acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc);
+
+acpi_status
+acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout);
+
+acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout);
+
+/*
+ * exoparg1 - ACPI AML execution, 1 operand
+ */
+acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state);
+
+/*
+ * exoparg2 - ACPI AML execution, 2 operands
+ */
+acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state);
+
+/*
+ * exoparg3 - ACPI AML execution, 3 operands
+ */
+acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state);
+
+acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state);
+
+/*
+ * exoparg6 - ACPI AML execution, 6 operands
+ */
+acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state *walk_state);
+
+/*
+ * exresolv - Object resolution and get value functions
+ */
+acpi_status
+acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
+ union acpi_operand_object *operand,
+ acpi_object_type * return_type,
+ union acpi_operand_object **return_desc);
+
+/*
+ * exresnte - resolve namespace node
+ */
+acpi_status
+acpi_ex_resolve_node_to_value(struct acpi_namespace_node **stack_ptr,
+ struct acpi_walk_state *walk_state);
+
+/*
+ * exresop - resolve operand to value
+ */
+acpi_status
+acpi_ex_resolve_operands(u16 opcode,
+ union acpi_operand_object **stack_ptr,
+ struct acpi_walk_state *walk_state);
+
+/*
+ * exdump - Interpreter debug output routines
+ */
+void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth);
+
+void
+acpi_ex_dump_operands(union acpi_operand_object **operands,
+ acpi_interpreter_mode interpreter_mode,
+ char *ident,
+ u32 num_levels,
+ char *note, char *module_name, u32 line_number);
+
+#ifdef ACPI_FUTURE_USAGE
+void
+acpi_ex_dump_object_descriptor(union acpi_operand_object *object, u32 flags);
+
+void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags);
+#endif /* ACPI_FUTURE_USAGE */
+
+/*
+ * exnames - AML namestring support
+ */
+acpi_status
+acpi_ex_get_name_string(acpi_object_type data_type,
+ u8 * in_aml_address,
+ char **out_name_string, u32 * out_name_length);
+
+/*
+ * exstore - Object store support
+ */
+acpi_status
+acpi_ex_store(union acpi_operand_object *val_desc,
+ union acpi_operand_object *dest_desc,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
+ struct acpi_namespace_node *node,
+ struct acpi_walk_state *walk_state,
+ u8 implicit_conversion);
+
+#define ACPI_IMPLICIT_CONVERSION TRUE
+#define ACPI_NO_IMPLICIT_CONVERSION FALSE
+
+/*
+ * exstoren - resolve/store object
+ */
+acpi_status
+acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
+ acpi_object_type target_type,
+ struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
+ union acpi_operand_object *dest_desc,
+ union acpi_operand_object **new_desc,
+ struct acpi_walk_state *walk_state);
+
+/*
+ * exstorob - store object - buffer/string
+ */
+acpi_status
+acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
+ union acpi_operand_object *target_desc);
+
+acpi_status
+acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
+ union acpi_operand_object *target_desc);
+
+/*
+ * excopy - object copy
+ */
+acpi_status
+acpi_ex_copy_integer_to_index_field(union acpi_operand_object *source_desc,
+ union acpi_operand_object *target_desc);
+
+acpi_status
+acpi_ex_copy_integer_to_bank_field(union acpi_operand_object *source_desc,
+ union acpi_operand_object *target_desc);
+
+acpi_status
+acpi_ex_copy_data_to_named_field(union acpi_operand_object *source_desc,
+ struct acpi_namespace_node *node);
+
+acpi_status
+acpi_ex_copy_integer_to_buffer_field(union acpi_operand_object *source_desc,
+ union acpi_operand_object *target_desc);
+
+/*
+ * exutils - interpreter/scanner utilities
+ */
+acpi_status acpi_ex_enter_interpreter(void);
+
+void acpi_ex_exit_interpreter(void);
+
+void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
+
+u8 acpi_ex_acquire_global_lock(u32 rule);
+
+void acpi_ex_release_global_lock(u8 locked);
+
+void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string);
+
+void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string);
+
+/*
+ * exregion - default op_region handlers
+ */
+acpi_status
+acpi_ex_system_memory_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context,
+ void *region_context);
+
+acpi_status
+acpi_ex_system_io_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context);
+
+acpi_status
+acpi_ex_pci_config_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context);
+
+acpi_status
+acpi_ex_cmos_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context);
+
+acpi_status
+acpi_ex_pci_bar_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context);
+
+acpi_status
+acpi_ex_embedded_controller_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context,
+ void *region_context);
+
+acpi_status
+acpi_ex_sm_bus_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context);
+
+acpi_status
+acpi_ex_data_table_space_handler(u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context, void *region_context);
+
+#endif /* __INTERP_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: aclocal.h - Internal data types used across the ACPI subsystem
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACLOCAL_H__
+#define __ACLOCAL_H__
+
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
+#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */
+#define ACPI_DO_NOT_WAIT 0
+#define ACPI_SERIALIZED 0xFF
+
+typedef u32 acpi_mutex_handle;
+#define ACPI_GLOBAL_LOCK (acpi_semaphore) (-1)
+
+/* Total number of aml opcodes defined */
+
+#define AML_NUM_OPCODES 0x7F
+
+/* Forward declarations */
+
+struct acpi_walk_state;
+struct acpi_obj_mutex;
+union acpi_parse_object;
+
+/*****************************************************************************
+ *
+ * Mutex typedefs and structs
+ *
+ ****************************************************************************/
+
+/*
+ * Predefined handles for the mutex objects used within the subsystem
+ * All mutex objects are automatically created by acpi_ut_mutex_initialize.
+ *
+ * The acquire/release ordering protocol is implied via this list. Mutexes
+ * with a lower value must be acquired before mutexes with a higher value.
+ *
+ * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names
+ * table below also!
+ */
+#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */
+#define ACPI_MTX_TABLES 1 /* Data for ACPI tables */
+#define ACPI_MTX_NAMESPACE 2 /* ACPI Namespace */
+#define ACPI_MTX_EVENTS 3 /* Data for ACPI events */
+#define ACPI_MTX_CACHES 4 /* Internal caches, general purposes */
+#define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */
+#define ACPI_MTX_DEBUG_CMD_COMPLETE 6 /* AML debugger */
+#define ACPI_MTX_DEBUG_CMD_READY 7 /* AML debugger */
+
+#define ACPI_MAX_MUTEX 7
+#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#ifdef DEFINE_ACPI_GLOBALS
+
+/* Debug names for the mutexes above */
+
+static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
+ "ACPI_MTX_Interpreter",
+ "ACPI_MTX_Tables",
+ "ACPI_MTX_Namespace",
+ "ACPI_MTX_Events",
+ "ACPI_MTX_Caches",
+ "ACPI_MTX_Memory",
+ "ACPI_MTX_CommandComplete",
+ "ACPI_MTX_CommandReady"
+};
+
+#endif
+#endif
+
+/*
+ * Predefined handles for spinlocks used within the subsystem.
+ * These spinlocks are created by acpi_ut_mutex_initialize
+ */
+#define ACPI_LOCK_GPES 0
+#define ACPI_LOCK_HARDWARE 1
+
+#define ACPI_MAX_LOCK 1
+#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
+
+/* Owner IDs are used to track namespace nodes for selective deletion */
+
+typedef u8 acpi_owner_id;
+#define ACPI_OWNER_ID_MAX 0xFF
+
+/* This Thread ID means that the mutex is not in use (unlocked) */
+
+#define ACPI_MUTEX_NOT_ACQUIRED (acpi_thread_id) 0
+
+/* Table for the global mutexes */
+
+struct acpi_mutex_info {
+ acpi_mutex mutex;
+ u32 use_count;
+ acpi_thread_id thread_id;
+};
+
+/* Lock flag parameter for various interfaces */
+
+#define ACPI_MTX_DO_NOT_LOCK 0
+#define ACPI_MTX_LOCK 1
+
+/* Field access granularities */
+
+#define ACPI_FIELD_BYTE_GRANULARITY 1
+#define ACPI_FIELD_WORD_GRANULARITY 2
+#define ACPI_FIELD_DWORD_GRANULARITY 4
+#define ACPI_FIELD_QWORD_GRANULARITY 8
+
+#define ACPI_ENTRY_NOT_FOUND NULL
+
+/*****************************************************************************
+ *
+ * Namespace typedefs and structs
+ *
+ ****************************************************************************/
+
+/* Operational modes of the AML interpreter/scanner */
+
+typedef enum {
+ ACPI_IMODE_LOAD_PASS1 = 0x01,
+ ACPI_IMODE_LOAD_PASS2 = 0x02,
+ ACPI_IMODE_EXECUTE = 0x0E
+} acpi_interpreter_mode;
+
+union acpi_name_union {
+ u32 integer;
+ char ascii[4];
+};
+
+/*
+ * The Namespace Node describes a named object that appears in the AML.
+ * descriptor_type is used to differentiate between internal descriptors.
+ *
+ * The node is optimized for both 32-bit and 64-bit platforms:
+ * 20 bytes for the 32-bit case, 32 bytes for the 64-bit case.
+ *
+ * Note: The descriptor_type and Type fields must appear in the identical
+ * position in both the struct acpi_namespace_node and union acpi_operand_object
+ * structures.
+ */
+struct acpi_namespace_node {
+ union acpi_operand_object *object; /* Interpreter object */
+ u8 descriptor_type; /* Differentiate object descriptor types */
+ u8 type; /* ACPI Type associated with this name */
+ u8 flags; /* Miscellaneous flags */
+ acpi_owner_id owner_id; /* Node creator */
+ union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */
+ struct acpi_namespace_node *child; /* First child */
+ struct acpi_namespace_node *peer; /* Peer. Parent if ANOBJ_END_OF_PEER_LIST set */
+
+ /*
+ * The following fields are used by the ASL compiler and disassembler only
+ */
+#ifdef ACPI_LARGE_NAMESPACE_NODE
+ union acpi_parse_object *op;
+ u32 value;
+ u32 length;
+#endif
+};
+
+/* Namespace Node flags */
+
+#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */
+#define ANOBJ_RESERVED 0x02 /* Available for future use */
+#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */
+#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
+#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
+
+#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */
+#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */
+#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* i_aSL only: Method has at least one return value */
+#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
+#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
+
+/*
+ * ACPI Table Descriptor. One per ACPI table
+ */
+struct acpi_table_desc {
+ struct acpi_table_desc *prev;
+ struct acpi_table_desc *next;
+ struct acpi_table_desc *installed_desc;
+ struct acpi_table_header *pointer;
+ u8 *aml_start;
+ u64 physical_address;
+ acpi_size length;
+ u32 aml_length;
+ acpi_owner_id owner_id;
+ u8 type;
+ u8 allocation;
+ u8 loaded_into_namespace;
+};
+
+struct acpi_table_list {
+ struct acpi_table_desc *next;
+ u32 count;
+};
+
+struct acpi_find_context {
+ char *search_for;
+ acpi_handle *list;
+ u32 *count;
+};
+
+struct acpi_ns_search_data {
+ struct acpi_namespace_node *node;
+};
+
+/*
+ * Predefined Namespace items
+ */
+struct acpi_predefined_names {
+ char *name;
+ u8 type;
+ char *val;
+};
+
+/* Object types used during package copies */
+
+#define ACPI_COPY_TYPE_SIMPLE 0
+#define ACPI_COPY_TYPE_PACKAGE 1
+
+/* Info structure used to convert external<->internal namestrings */
+
+struct acpi_namestring_info {
+ char *external_name;
+ char *next_external_char;
+ char *internal_name;
+ u32 length;
+ u32 num_segments;
+ u32 num_carats;
+ u8 fully_qualified;
+};
+
+/* Field creation info */
+
+struct acpi_create_field_info {
+ struct acpi_namespace_node *region_node;
+ struct acpi_namespace_node *field_node;
+ struct acpi_namespace_node *register_node;
+ struct acpi_namespace_node *data_register_node;
+ u32 bank_value;
+ u32 field_bit_position;
+ u32 field_bit_length;
+ u8 field_flags;
+ u8 attribute;
+ u8 field_type;
+};
+
+typedef
+acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
+
+/*
+ * Bitmapped ACPI types. Used internally only
+ */
+#define ACPI_BTYPE_ANY 0x00000000
+#define ACPI_BTYPE_INTEGER 0x00000001
+#define ACPI_BTYPE_STRING 0x00000002
+#define ACPI_BTYPE_BUFFER 0x00000004
+#define ACPI_BTYPE_PACKAGE 0x00000008
+#define ACPI_BTYPE_FIELD_UNIT 0x00000010
+#define ACPI_BTYPE_DEVICE 0x00000020
+#define ACPI_BTYPE_EVENT 0x00000040
+#define ACPI_BTYPE_METHOD 0x00000080
+#define ACPI_BTYPE_MUTEX 0x00000100
+#define ACPI_BTYPE_REGION 0x00000200
+#define ACPI_BTYPE_POWER 0x00000400
+#define ACPI_BTYPE_PROCESSOR 0x00000800
+#define ACPI_BTYPE_THERMAL 0x00001000
+#define ACPI_BTYPE_BUFFER_FIELD 0x00002000
+#define ACPI_BTYPE_DDB_HANDLE 0x00004000
+#define ACPI_BTYPE_DEBUG_OBJECT 0x00008000
+#define ACPI_BTYPE_REFERENCE 0x00010000
+#define ACPI_BTYPE_RESOURCE 0x00020000
+
+#define ACPI_BTYPE_COMPUTE_DATA (ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING | ACPI_BTYPE_BUFFER)
+
+#define ACPI_BTYPE_DATA (ACPI_BTYPE_COMPUTE_DATA | ACPI_BTYPE_PACKAGE)
+#define ACPI_BTYPE_DATA_REFERENCE (ACPI_BTYPE_DATA | ACPI_BTYPE_REFERENCE | ACPI_BTYPE_DDB_HANDLE)
+#define ACPI_BTYPE_DEVICE_OBJECTS (ACPI_BTYPE_DEVICE | ACPI_BTYPE_THERMAL | ACPI_BTYPE_PROCESSOR)
+#define ACPI_BTYPE_OBJECTS_AND_REFS 0x0001FFFF /* ARG or LOCAL */
+#define ACPI_BTYPE_ALL_OBJECTS 0x0000FFFF
+
+/*****************************************************************************
+ *
+ * Event typedefs and structs
+ *
+ ****************************************************************************/
+
+/* Dispatch info for each GPE -- either a method or handler, cannot be both */
+
+struct acpi_handler_info {
+ acpi_event_handler address; /* Address of handler, if any */
+ void *context; /* Context to be passed to handler */
+ struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
+};
+
+union acpi_gpe_dispatch_info {
+ struct acpi_namespace_node *method_node; /* Method node for this GPE level */
+ struct acpi_handler_info *handler;
+};
+
+/*
+ * Information about a GPE, one per each GPE in an array.
+ * NOTE: Important to keep this struct as small as possible.
+ */
+struct acpi_gpe_event_info {
+ union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */
+ struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
+ u8 flags; /* Misc info about this GPE */
+ u8 register_bit; /* This GPE bit within the register */
+};
+
+/* Information about a GPE register pair, one per each status/enable pair in an array */
+
+struct acpi_gpe_register_info {
+ struct acpi_generic_address status_address; /* Address of status reg */
+ struct acpi_generic_address enable_address; /* Address of enable reg */
+ u8 enable_for_wake; /* GPEs to keep enabled when sleeping */
+ u8 enable_for_run; /* GPEs to keep enabled when running */
+ u8 base_gpe_number; /* Base GPE number for this register */
+};
+
+/*
+ * Information about a GPE register block, one per each installed block --
+ * GPE0, GPE1, and one per each installed GPE Block Device.
+ */
+struct acpi_gpe_block_info {
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_block_info *previous;
+ struct acpi_gpe_block_info *next;
+ struct acpi_gpe_xrupt_info *xrupt_block; /* Backpointer to interrupt block */
+ struct acpi_gpe_register_info *register_info; /* One per GPE register pair */
+ struct acpi_gpe_event_info *event_info; /* One for each GPE */
+ struct acpi_generic_address block_address; /* Base address of the block */
+ u32 register_count; /* Number of register pairs in block */
+ u8 block_base_number; /* Base GPE number for this block */
+};
+
+/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
+
+struct acpi_gpe_xrupt_info {
+ struct acpi_gpe_xrupt_info *previous;
+ struct acpi_gpe_xrupt_info *next;
+ struct acpi_gpe_block_info *gpe_block_list_head; /* List of GPE blocks for this xrupt */
+ u32 interrupt_number; /* System interrupt number */
+};
+
+struct acpi_gpe_walk_info {
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+};
+
+typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
+ gpe_xrupt_info,
+ struct acpi_gpe_block_info *
+ gpe_block);
+
+/* Information about each particular fixed event */
+
+struct acpi_fixed_event_handler {
+ acpi_event_handler handler; /* Address of handler. */
+ void *context; /* Context to be passed to handler */
+};
+
+struct acpi_fixed_event_info {
+ u8 status_register_id;
+ u8 enable_register_id;
+ u16 status_bit_mask;
+ u16 enable_bit_mask;
+};
+
+/* Information used during field processing */
+
+struct acpi_field_info {
+ u8 skip_field;
+ u8 field_flag;
+ u32 pkg_length;
+};
+
+/*****************************************************************************
+ *
+ * Generic "state" object for stacks
+ *
+ ****************************************************************************/
+
+#define ACPI_CONTROL_NORMAL 0xC0
+#define ACPI_CONTROL_CONDITIONAL_EXECUTING 0xC1
+#define ACPI_CONTROL_PREDICATE_EXECUTING 0xC2
+#define ACPI_CONTROL_PREDICATE_FALSE 0xC3
+#define ACPI_CONTROL_PREDICATE_TRUE 0xC4
+
+#define ACPI_STATE_COMMON \
+ void *next; \
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 flags; \
+ u16 value; \
+ u16 state;
+
+ /* There are 2 bytes available here until the next natural alignment boundary */
+
+struct acpi_common_state {
+ACPI_STATE_COMMON};
+
+/*
+ * Update state - used to traverse complex objects such as packages
+ */
+struct acpi_update_state {
+ ACPI_STATE_COMMON union acpi_operand_object *object;
+};
+
+/*
+ * Pkg state - used to traverse nested package structures
+ */
+struct acpi_pkg_state {
+ ACPI_STATE_COMMON u16 index;
+ union acpi_operand_object *source_object;
+ union acpi_operand_object *dest_object;
+ struct acpi_walk_state *walk_state;
+ void *this_target_obj;
+ u32 num_packages;
+};
+
+/*
+ * Control state - one per if/else and while constructs.
+ * Allows nesting of these constructs
+ */
+struct acpi_control_state {
+ ACPI_STATE_COMMON u16 opcode;
+ union acpi_parse_object *predicate_op;
+ u8 *aml_predicate_start; /* Start of if/while predicate */
+ u8 *package_end; /* End of if/while block */
+};
+
+/*
+ * Scope state - current scope during namespace lookups
+ */
+struct acpi_scope_state {
+ ACPI_STATE_COMMON struct acpi_namespace_node *node;
+};
+
+struct acpi_pscope_state {
+ ACPI_STATE_COMMON u32 arg_count; /* Number of fixed arguments */
+ union acpi_parse_object *op; /* Current op being parsed */
+ u8 *arg_end; /* Current argument end */
+ u8 *pkg_end; /* Current package end */
+ u32 arg_list; /* Next argument to parse */
+};
+
+/*
+ * Thread state - one per thread across multiple walk states. Multiple walk
+ * states are created when there are nested control methods executing.
+ */
+struct acpi_thread_state {
+ ACPI_STATE_COMMON u8 current_sync_level; /* Mutex Sync (nested acquire) level */
+ struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */
+ union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */
+ acpi_thread_id thread_id; /* Running thread ID */
+};
+
+/*
+ * Result values - used to accumulate the results of nested
+ * AML arguments
+ */
+struct acpi_result_values {
+ ACPI_STATE_COMMON u8 num_results;
+ u8 last_insert;
+ union acpi_operand_object *obj_desc[ACPI_OBJ_NUM_OPERANDS];
+};
+
+typedef
+acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
+ union acpi_parse_object ** out_op);
+
+typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
+
+/*
+ * Notify info - used to pass info to the deferred notify
+ * handler/dispatcher.
+ */
+struct acpi_notify_info {
+ ACPI_STATE_COMMON struct acpi_namespace_node *node;
+ union acpi_operand_object *handler_obj;
+};
+
+/* Generic state is union of structs above */
+
+union acpi_generic_state {
+ struct acpi_common_state common;
+ struct acpi_control_state control;
+ struct acpi_update_state update;
+ struct acpi_scope_state scope;
+ struct acpi_pscope_state parse_scope;
+ struct acpi_pkg_state pkg;
+ struct acpi_thread_state thread;
+ struct acpi_result_values results;
+ struct acpi_notify_info notify;
+};
+
+/*****************************************************************************
+ *
+ * Interpreter typedefs and structs
+ *
+ ****************************************************************************/
+
+typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
+
+/*****************************************************************************
+ *
+ * Parser typedefs and structs
+ *
+ ****************************************************************************/
+
+/*
+ * AML opcode, name, and argument layout
+ */
+struct acpi_opcode_info {
+#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT)
+ char *name; /* Opcode name (disassembler/debug only) */
+#endif
+ u32 parse_args; /* Grammar/Parse time arguments */
+ u32 runtime_args; /* Interpret time arguments */
+ u16 flags; /* Misc flags */
+ u8 object_type; /* Corresponding internal object type */
+ u8 class; /* Opcode class */
+ u8 type; /* Opcode type */
+};
+
+union acpi_parse_value {
+ acpi_integer integer; /* Integer constant (Up to 64 bits) */
+ struct uint64_struct integer64; /* Structure overlay for 2 32-bit Dwords */
+ u32 size; /* bytelist or field size */
+ char *string; /* NULL terminated string */
+ u8 *buffer; /* buffer or string */
+ char *name; /* NULL terminated string */
+ union acpi_parse_object *arg; /* arguments and contained ops */
+};
+
+#define ACPI_PARSE_COMMON \
+ union acpi_parse_object *parent; /* Parent op */\
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 flags; /* Type of Op */\
+ u16 aml_opcode; /* AML opcode */\
+ u32 aml_offset; /* Offset of declaration in AML */\
+ union acpi_parse_object *next; /* Next op */\
+ struct acpi_namespace_node *node; /* For use by interpreter */\
+ union acpi_parse_value value; /* Value or args associated with the opcode */\
+ ACPI_DISASM_ONLY_MEMBERS (\
+ u8 disasm_flags; /* Used during AML disassembly */\
+ u8 disasm_opcode; /* Subtype used for disassembly */\
+ char aml_op_name[16]) /* Op name (debug only) */
+
+#define ACPI_DASM_BUFFER 0x00
+#define ACPI_DASM_RESOURCE 0x01
+#define ACPI_DASM_STRING 0x02
+#define ACPI_DASM_UNICODE 0x03
+#define ACPI_DASM_EISAID 0x04
+#define ACPI_DASM_MATCHOP 0x05
+#define ACPI_DASM_LNOT_PREFIX 0x06
+#define ACPI_DASM_LNOT_SUFFIX 0x07
+#define ACPI_DASM_IGNORE 0x08
+
+/*
+ * Generic operation (for example: If, While, Store)
+ */
+struct acpi_parse_obj_common {
+ACPI_PARSE_COMMON};
+
+/*
+ * Extended Op for named ops (Scope, Method, etc.), deferred ops (Methods and op_regions),
+ * and bytelists.
+ */
+struct acpi_parse_obj_named {
+ ACPI_PARSE_COMMON u8 * path;
+ u8 *data; /* AML body or bytelist data */
+ u32 length; /* AML length */
+ u32 name; /* 4-byte name or zero if no name */
+};
+
+/* This version is used by the i_aSL compiler only */
+
+#define ACPI_MAX_PARSEOP_NAME 20
+
+struct acpi_parse_obj_asl {
+ ACPI_PARSE_COMMON union acpi_parse_object *child;
+ union acpi_parse_object *parent_method;
+ char *filename;
+ char *external_name;
+ char *namepath;
+ char name_seg[4];
+ u32 extra_value;
+ u32 column;
+ u32 line_number;
+ u32 logical_line_number;
+ u32 logical_byte_offset;
+ u32 end_line;
+ u32 end_logical_line;
+ u32 acpi_btype;
+ u32 aml_length;
+ u32 aml_subtree_length;
+ u32 final_aml_length;
+ u32 final_aml_offset;
+ u32 compile_flags;
+ u16 parse_opcode;
+ u8 aml_opcode_length;
+ u8 aml_pkg_len_bytes;
+ u8 extra;
+ char parse_op_name[ACPI_MAX_PARSEOP_NAME];
+};
+
+union acpi_parse_object {
+ struct acpi_parse_obj_common common;
+ struct acpi_parse_obj_named named;
+ struct acpi_parse_obj_asl asl;
+};
+
+/*
+ * Parse state - one state per parser invocation and each control
+ * method.
+ */
+struct acpi_parse_state {
+ u8 *aml_start; /* First AML byte */
+ u8 *aml; /* Next AML byte */
+ u8 *aml_end; /* (last + 1) AML byte */
+ u8 *pkg_start; /* Current package begin */
+ u8 *pkg_end; /* Current package end */
+ union acpi_parse_object *start_op; /* Root of parse tree */
+ struct acpi_namespace_node *start_node;
+ union acpi_generic_state *scope; /* Current scope */
+ union acpi_parse_object *start_scope;
+ u32 aml_size;
+};
+
+/* Parse object flags */
+
+#define ACPI_PARSEOP_GENERIC 0x01
+#define ACPI_PARSEOP_NAMED 0x02
+#define ACPI_PARSEOP_DEFERRED 0x04
+#define ACPI_PARSEOP_BYTELIST 0x08
+#define ACPI_PARSEOP_IN_CACHE 0x80
+
+/* Parse object disasm_flags */
+
+#define ACPI_PARSEOP_IGNORE 0x01
+#define ACPI_PARSEOP_PARAMLIST 0x02
+#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
+#define ACPI_PARSEOP_SPECIAL 0x10
+
+/*****************************************************************************
+ *
+ * Hardware (ACPI registers) and PNP
+ *
+ ****************************************************************************/
+
+#define PCI_ROOT_HID_STRING "PNP0A03"
+#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08"
+
+struct acpi_bit_register_info {
+ u8 parent_register;
+ u8 bit_position;
+ u16 access_bit_mask;
+};
+
+/*
+ * Some ACPI registers have bits that must be ignored -- meaning that they
+ * must be preserved.
+ */
+#define ACPI_PM1_STATUS_PRESERVED_BITS 0x0800 /* Bit 11 */
+#define ACPI_PM1_CONTROL_PRESERVED_BITS 0x0201 /* Bit 9, Bit 0 (SCI_EN) */
+
+/*
+ * Register IDs
+ * These are the full ACPI registers
+ */
+#define ACPI_REGISTER_PM1_STATUS 0x01
+#define ACPI_REGISTER_PM1_ENABLE 0x02
+#define ACPI_REGISTER_PM1_CONTROL 0x03
+#define ACPI_REGISTER_PM1A_CONTROL 0x04
+#define ACPI_REGISTER_PM1B_CONTROL 0x05
+#define ACPI_REGISTER_PM2_CONTROL 0x06
+#define ACPI_REGISTER_PM_TIMER 0x07
+#define ACPI_REGISTER_PROCESSOR_BLOCK 0x08
+#define ACPI_REGISTER_SMI_COMMAND_BLOCK 0x09
+
+/* Masks used to access the bit_registers */
+
+#define ACPI_BITMASK_TIMER_STATUS 0x0001
+#define ACPI_BITMASK_BUS_MASTER_STATUS 0x0010
+#define ACPI_BITMASK_GLOBAL_LOCK_STATUS 0x0020
+#define ACPI_BITMASK_POWER_BUTTON_STATUS 0x0100
+#define ACPI_BITMASK_SLEEP_BUTTON_STATUS 0x0200
+#define ACPI_BITMASK_RT_CLOCK_STATUS 0x0400
+#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
+#define ACPI_BITMASK_WAKE_STATUS 0x8000
+
+#define ACPI_BITMASK_ALL_FIXED_STATUS (\
+ ACPI_BITMASK_TIMER_STATUS | \
+ ACPI_BITMASK_BUS_MASTER_STATUS | \
+ ACPI_BITMASK_GLOBAL_LOCK_STATUS | \
+ ACPI_BITMASK_POWER_BUTTON_STATUS | \
+ ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
+ ACPI_BITMASK_RT_CLOCK_STATUS | \
+ ACPI_BITMASK_WAKE_STATUS)
+
+#define ACPI_BITMASK_TIMER_ENABLE 0x0001
+#define ACPI_BITMASK_GLOBAL_LOCK_ENABLE 0x0020
+#define ACPI_BITMASK_POWER_BUTTON_ENABLE 0x0100
+#define ACPI_BITMASK_SLEEP_BUTTON_ENABLE 0x0200
+#define ACPI_BITMASK_RT_CLOCK_ENABLE 0x0400
+#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */
+
+#define ACPI_BITMASK_SCI_ENABLE 0x0001
+#define ACPI_BITMASK_BUS_MASTER_RLD 0x0002
+#define ACPI_BITMASK_GLOBAL_LOCK_RELEASE 0x0004
+#define ACPI_BITMASK_SLEEP_TYPE_X 0x1C00
+#define ACPI_BITMASK_SLEEP_ENABLE 0x2000
+
+#define ACPI_BITMASK_ARB_DISABLE 0x0001
+
+/* Raw bit position of each bit_register */
+
+#define ACPI_BITPOSITION_TIMER_STATUS 0x00
+#define ACPI_BITPOSITION_BUS_MASTER_STATUS 0x04
+#define ACPI_BITPOSITION_GLOBAL_LOCK_STATUS 0x05
+#define ACPI_BITPOSITION_POWER_BUTTON_STATUS 0x08
+#define ACPI_BITPOSITION_SLEEP_BUTTON_STATUS 0x09
+#define ACPI_BITPOSITION_RT_CLOCK_STATUS 0x0A
+#define ACPI_BITPOSITION_PCIEXP_WAKE_STATUS 0x0E /* ACPI 3.0 */
+#define ACPI_BITPOSITION_WAKE_STATUS 0x0F
+
+#define ACPI_BITPOSITION_TIMER_ENABLE 0x00
+#define ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE 0x05
+#define ACPI_BITPOSITION_POWER_BUTTON_ENABLE 0x08
+#define ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE 0x09
+#define ACPI_BITPOSITION_RT_CLOCK_ENABLE 0x0A
+#define ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE 0x0E /* ACPI 3.0 */
+
+#define ACPI_BITPOSITION_SCI_ENABLE 0x00
+#define ACPI_BITPOSITION_BUS_MASTER_RLD 0x01
+#define ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE 0x02
+#define ACPI_BITPOSITION_SLEEP_TYPE_X 0x0A
+#define ACPI_BITPOSITION_SLEEP_ENABLE 0x0D
+
+#define ACPI_BITPOSITION_ARB_DISABLE 0x00
+
+/*****************************************************************************
+ *
+ * Resource descriptors
+ *
+ ****************************************************************************/
+
+/* resource_type values */
+
+#define ACPI_ADDRESS_TYPE_MEMORY_RANGE 0
+#define ACPI_ADDRESS_TYPE_IO_RANGE 1
+#define ACPI_ADDRESS_TYPE_BUS_NUMBER_RANGE 2
+
+/* Resource descriptor types and masks */
+
+#define ACPI_RESOURCE_NAME_LARGE 0x80
+#define ACPI_RESOURCE_NAME_SMALL 0x00
+
+#define ACPI_RESOURCE_NAME_SMALL_MASK 0x78 /* Bits 6:3 contain the type */
+#define ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK 0x07 /* Bits 2:0 contain the length */
+#define ACPI_RESOURCE_NAME_LARGE_MASK 0x7F /* Bits 6:0 contain the type */
+
+/*
+ * Small resource descriptor "names" as defined by the ACPI specification.
+ * Note: Bits 2:0 are used for the descriptor length
+ */
+#define ACPI_RESOURCE_NAME_IRQ 0x20
+#define ACPI_RESOURCE_NAME_DMA 0x28
+#define ACPI_RESOURCE_NAME_START_DEPENDENT 0x30
+#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38
+#define ACPI_RESOURCE_NAME_IO 0x40
+#define ACPI_RESOURCE_NAME_FIXED_IO 0x48
+#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50
+#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58
+#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60
+#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68
+#define ACPI_RESOURCE_NAME_VENDOR_SMALL 0x70
+#define ACPI_RESOURCE_NAME_END_TAG 0x78
+
+/*
+ * Large resource descriptor "names" as defined by the ACPI specification.
+ * Note: includes the Large Descriptor bit in bit[7]
+ */
+#define ACPI_RESOURCE_NAME_MEMORY24 0x81
+#define ACPI_RESOURCE_NAME_GENERIC_REGISTER 0x82
+#define ACPI_RESOURCE_NAME_RESERVED_L1 0x83
+#define ACPI_RESOURCE_NAME_VENDOR_LARGE 0x84
+#define ACPI_RESOURCE_NAME_MEMORY32 0x85
+#define ACPI_RESOURCE_NAME_FIXED_MEMORY32 0x86
+#define ACPI_RESOURCE_NAME_ADDRESS32 0x87
+#define ACPI_RESOURCE_NAME_ADDRESS16 0x88
+#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89
+#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A
+#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B
+
+/*****************************************************************************
+ *
+ * Miscellaneous
+ *
+ ****************************************************************************/
+
+#define ACPI_ASCII_ZERO 0x30
+
+/*****************************************************************************
+ *
+ * Debugger
+ *
+ ****************************************************************************/
+
+struct acpi_db_method_info {
+ acpi_handle thread_gate;
+ char *name;
+ char **args;
+ u32 flags;
+ u32 num_loops;
+ char pathname[128];
+};
+
+struct acpi_integrity_info {
+ u32 nodes;
+ u32 objects;
+};
+
+#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
+#define ACPI_DB_CONSOLE_OUTPUT 0x02
+#define ACPI_DB_DUPLICATE_OUTPUT 0x03
+
+/*****************************************************************************
+ *
+ * Debug
+ *
+ ****************************************************************************/
+
+/* Entry for a memory allocation (debug only) */
+
+#define ACPI_MEM_MALLOC 0
+#define ACPI_MEM_CALLOC 1
+#define ACPI_MAX_MODULE_NAME 16
+
+#define ACPI_COMMON_DEBUG_MEM_HEADER \
+ struct acpi_debug_mem_block *previous; \
+ struct acpi_debug_mem_block *next; \
+ u32 size; \
+ u32 component; \
+ u32 line; \
+ char module[ACPI_MAX_MODULE_NAME]; \
+ u8 alloc_type;
+
+struct acpi_debug_mem_header {
+ACPI_COMMON_DEBUG_MEM_HEADER};
+
+struct acpi_debug_mem_block {
+ ACPI_COMMON_DEBUG_MEM_HEADER u64 user_space;
+};
+
+#define ACPI_MEM_LIST_GLOBAL 0
+#define ACPI_MEM_LIST_NSNODE 1
+#define ACPI_MEM_LIST_MAX 1
+#define ACPI_NUM_MEM_LISTS 2
+
+struct acpi_memory_list {
+ char *list_name;
+ void *list_head;
+ u16 object_size;
+ u16 max_depth;
+ u16 current_depth;
+ u16 link_offset;
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+ /* Statistics for debug memory tracking only */
+
+ u32 total_allocated;
+ u32 total_freed;
+ u32 current_total_size;
+ u32 requests;
+ u32 hits;
+#endif
+};
+
+#endif /* __ACLOCAL_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acmacros.h - C macros for the entire subsystem.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACMACROS_H__
+#define __ACMACROS_H__
+
+/*
+ * Data manipulation macros
+ */
+#define ACPI_LOWORD(l) ((u16)(u32)(l))
+#define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF))
+#define ACPI_LOBYTE(l) ((u8)(u16)(l))
+#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF))
+
+#define ACPI_SET_BIT(target,bit) ((target) |= (bit))
+#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
+#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))
+
+/* Size calculation */
+
+#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))
+
+#if ACPI_MACHINE_WIDTH == 16
+
+/*
+ * For 16-bit addresses, we have to assume that the upper 32 bits
+ * (out of 64) are zero.
+ */
+#define ACPI_LODWORD(l) ((u32)(l))
+#define ACPI_HIDWORD(l) ((u32)(0))
+
+#define ACPI_GET_ADDRESS(a) ((a).lo)
+#define ACPI_STORE_ADDRESS(a,b) {(a).hi=0;(a).lo=(u32)(b);}
+#define ACPI_VALID_ADDRESS(a) ((a).hi | (a).lo)
+
+#else
+#ifdef ACPI_NO_INTEGER64_SUPPORT
+/*
+ * acpi_integer is 32-bits, no 64-bit support on this platform
+ */
+#define ACPI_LODWORD(l) ((u32)(l))
+#define ACPI_HIDWORD(l) ((u32)(0))
+
+#define ACPI_GET_ADDRESS(a) (a)
+#define ACPI_STORE_ADDRESS(a,b) ((a)=(b))
+#define ACPI_VALID_ADDRESS(a) (a)
+
+#else
+
+/*
+ * Full 64-bit address/integer on both 32-bit and 64-bit platforms
+ */
+#define ACPI_LODWORD(l) ((u32)(u64)(l))
+#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi))
+
+#define ACPI_GET_ADDRESS(a) (a)
+#define ACPI_STORE_ADDRESS(a,b) ((a)=(acpi_physical_address)(b))
+#define ACPI_VALID_ADDRESS(a) (a)
+#endif
+#endif
+
+/*
+ * printf() format helpers
+ */
+
+/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
+
+#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i),ACPI_LODWORD(i)
+
+/*
+ * Extract data using a pointer. Any more than a byte and we
+ * get into potential aligment issues -- see the STORE macros below.
+ * Use with care.
+ */
+#define ACPI_GET8(ptr) *ACPI_CAST_PTR (u8, ptr)
+#define ACPI_GET16(ptr) *ACPI_CAST_PTR (u16, ptr)
+#define ACPI_GET32(ptr) *ACPI_CAST_PTR (u32, ptr)
+#define ACPI_GET64(ptr) *ACPI_CAST_PTR (u64, ptr)
+#define ACPI_SET8(ptr) *ACPI_CAST_PTR (u8, ptr)
+#define ACPI_SET16(ptr) *ACPI_CAST_PTR (u16, ptr)
+#define ACPI_SET32(ptr) *ACPI_CAST_PTR (u32, ptr)
+#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr)
+
+/*
+ * Pointer manipulation
+ */
+#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))
+#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
+#define ACPI_ADD_PTR(t,a,b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8,(a)) + (acpi_native_uint)(b)))
+#define ACPI_PTR_DIFF(a,b) (acpi_native_uint) (ACPI_CAST_PTR (u8,(a)) - ACPI_CAST_PTR (u8,(b)))
+
+/* Pointer/Integer type conversions */
+
+#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i)
+#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL)
+#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL)
+
+#if ACPI_MACHINE_WIDTH == 16
+#define ACPI_STORE_POINTER(d,s) ACPI_MOVE_32_TO_32(d,s)
+#define ACPI_PHYSADDR_TO_PTR(i) (void *)(i)
+#define ACPI_PTR_TO_PHYSADDR(i) (u32) ACPI_CAST_PTR (u8,(i))
+#else
+#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
+#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
+#endif
+
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b)))
+#else
+#define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char,(a)), ACPI_CAST_PTR (char,(b)), ACPI_NAME_SIZE))
+#endif
+
+/*
+ * Macros for moving data around to/from buffers that are possibly unaligned.
+ * If the hardware supports the transfer of unaligned data, just do the store.
+ * Otherwise, we have to move one byte at a time.
+ */
+#ifdef ACPI_BIG_ENDIAN
+/*
+ * Macros for big-endian machines
+ */
+
+/* This macro sets a buffer index, starting from the end of the buffer */
+
+#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) ((buf_len) - (((buf_offset)+1) * (byte_gran)))
+
+/* These macros reverse the bytes during the move, converting little-endian to big endian */
+
+ /* Big Endian <== Little Endian */
+ /* Hi...Lo Lo...Hi */
+/* 16-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
+
+#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d))=0;\
+ ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+ ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+
+#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\
+ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+
+/* 32-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+
+#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
+ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+
+#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d))=0;\
+ ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
+ ((u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\
+ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+
+/* 64-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+
+#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
+
+#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[7];\
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[6];\
+ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[5];\
+ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[4];\
+ (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
+ (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\
+ (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+ (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+#else
+/*
+ * Macros for little-endian machines
+ */
+
+/* This macro sets a buffer index, starting from the beginning of the buffer */
+
+#define ACPI_BUFFER_INDEX(buf_len,buf_offset,byte_gran) (buf_offset)
+
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+
+/* The hardware supports unaligned transfers, just do the little-endian move */
+
+#if ACPI_MACHINE_WIDTH == 16
+
+/* No 64-bit integers */
+/* 16-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
+#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s)
+#define ACPI_MOVE_16_TO_64(d,s) ACPI_MOVE_16_TO_32(d,s)
+
+/* 32-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s)
+#define ACPI_MOVE_32_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)
+
+/* 64-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
+#define ACPI_MOVE_64_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)
+
+#else
+/* 16-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
+#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s)
+#define ACPI_MOVE_16_TO_64(d,s) *(u64 *)(void *)(d) = *(u16 *)(void *)(s)
+
+/* 32-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s)
+#define ACPI_MOVE_32_TO_64(d,s) *(u64 *)(void *)(d) = *(u32 *)(void *)(s)
+
+/* 64-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
+#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s)
+#endif
+
+#else
+/*
+ * The hardware does not support unaligned transfers. We must move the
+ * data one byte at a time. These macros work whether the source or
+ * the destination (or both) is/are unaligned. (Little-endian move)
+ */
+
+/* 16-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_16_TO_16(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];}
+
+#define ACPI_MOVE_16_TO_32(d,s) {(*(u32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}
+#define ACPI_MOVE_16_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d,s);}
+
+/* 32-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+
+#define ACPI_MOVE_32_TO_32(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\
+ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\
+ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];}
+
+#define ACPI_MOVE_32_TO_64(d,s) {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d,s);}
+
+/* 64-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
+#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
+#define ACPI_MOVE_64_TO_64(d,s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\
+ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\
+ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];\
+ (( u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[4];\
+ (( u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[5];\
+ (( u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[6];\
+ (( u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[7];}
+#endif
+#endif
+
+/* Macros based on machine integer width */
+
+#if ACPI_MACHINE_WIDTH == 16
+#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s)
+
+#elif ACPI_MACHINE_WIDTH == 32
+#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s)
+
+#elif ACPI_MACHINE_WIDTH == 64
+#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_64_TO_16(d,s)
+
+#else
+#error unknown ACPI_MACHINE_WIDTH
+#endif
+
+/*
+ * Fast power-of-two math macros for non-optimized compilers
+ */
+#define _ACPI_DIV(value,power_of2) ((u32) ((value) >> (power_of2)))
+#define _ACPI_MUL(value,power_of2) ((u32) ((value) << (power_of2)))
+#define _ACPI_MOD(value,divisor) ((u32) ((value) & ((divisor) -1)))
+
+#define ACPI_DIV_2(a) _ACPI_DIV(a,1)
+#define ACPI_MUL_2(a) _ACPI_MUL(a,1)
+#define ACPI_MOD_2(a) _ACPI_MOD(a,2)
+
+#define ACPI_DIV_4(a) _ACPI_DIV(a,2)
+#define ACPI_MUL_4(a) _ACPI_MUL(a,2)
+#define ACPI_MOD_4(a) _ACPI_MOD(a,4)
+
+#define ACPI_DIV_8(a) _ACPI_DIV(a,3)
+#define ACPI_MUL_8(a) _ACPI_MUL(a,3)
+#define ACPI_MOD_8(a) _ACPI_MOD(a,8)
+
+#define ACPI_DIV_16(a) _ACPI_DIV(a,4)
+#define ACPI_MUL_16(a) _ACPI_MUL(a,4)
+#define ACPI_MOD_16(a) _ACPI_MOD(a,16)
+
+#define ACPI_DIV_32(a) _ACPI_DIV(a,5)
+#define ACPI_MUL_32(a) _ACPI_MUL(a,5)
+#define ACPI_MOD_32(a) _ACPI_MOD(a,32)
+
+/*
+ * Rounding macros (Power of two boundaries only)
+ */
+#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & \
+ (~(((acpi_native_uint) boundary)-1)))
+
+#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + \
+ (((acpi_native_uint) boundary)-1)) & \
+ (~(((acpi_native_uint) boundary)-1)))
+
+/* Note: sizeof(acpi_native_uint) evaluates to either 2, 4, or 8 */
+
+#define ACPI_ROUND_DOWN_TO_32BIT(a) ACPI_ROUND_DOWN(a,4)
+#define ACPI_ROUND_DOWN_TO_64BIT(a) ACPI_ROUND_DOWN(a,8)
+#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,sizeof(acpi_native_uint))
+
+#define ACPI_ROUND_UP_TO_32BIT(a) ACPI_ROUND_UP(a,4)
+#define ACPI_ROUND_UP_TO_64BIT(a) ACPI_ROUND_UP(a,8)
+#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,sizeof(acpi_native_uint))
+
+#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7)
+#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a))
+
+#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10)
+
+/* Generic (non-power-of-two) rounding */
+
+#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary))
+
+#define ACPI_IS_MISALIGNED(value) (((acpi_native_uint)value) & (sizeof(acpi_native_uint)-1))
+
+/*
+ * Bitmask creation
+ * Bit positions start at zero.
+ * MASK_BITS_ABOVE creates a mask starting AT the position and above
+ * MASK_BITS_BELOW creates a mask starting one bit BELOW the position
+ */
+#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((u32) (position))))
+#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((u32) (position)))
+
+#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
+
+/* Bitfields within ACPI registers */
+
+#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask)
+#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
+
+#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))
+
+/* Generate a UUID */
+
+#define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
+ (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \
+ (b) & 0xFF, ((b) >> 8) & 0xFF, \
+ (c) & 0xFF, ((c) >> 8) & 0xFF, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)
+
+/*
+ * An struct acpi_namespace_node * can appear in some contexts,
+ * where a pointer to an union acpi_operand_object can also
+ * appear. This macro is used to distinguish them.
+ *
+ * The "Descriptor" field is the first field in both structures.
+ */
+#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
+#define ACPI_SET_DESCRIPTOR_TYPE(d,t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
+
+/* Macro to test the object type */
+
+#define ACPI_GET_OBJECT_TYPE(d) (((union acpi_operand_object *)(void *)(d))->common.type)
+
+/* Macro to check the table flags for SINGLE or MULTIPLE tables are allowed */
+
+#define ACPI_IS_SINGLE_TABLE(x) (((x) & 0x01) == ACPI_TABLE_SINGLE ? 1 : 0)
+
+/*
+ * Macros for the master AML opcode table
+ */
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
+#define ACPI_OP(name,Pargs,Iargs,obj_type,class,type,flags) {name,(u32)(Pargs),(u32)(Iargs),(u32)(flags),obj_type,class,type}
+#else
+#define ACPI_OP(name,Pargs,Iargs,obj_type,class,type,flags) {(u32)(Pargs),(u32)(Iargs),(u32)(flags),obj_type,class,type}
+#endif
+
+#ifdef ACPI_DISASSEMBLER
+#define ACPI_DISASM_ONLY_MEMBERS(a) a;
+#else
+#define ACPI_DISASM_ONLY_MEMBERS(a)
+#endif
+
+#define ARG_TYPE_WIDTH 5
+#define ARG_1(x) ((u32)(x))
+#define ARG_2(x) ((u32)(x) << (1 * ARG_TYPE_WIDTH))
+#define ARG_3(x) ((u32)(x) << (2 * ARG_TYPE_WIDTH))
+#define ARG_4(x) ((u32)(x) << (3 * ARG_TYPE_WIDTH))
+#define ARG_5(x) ((u32)(x) << (4 * ARG_TYPE_WIDTH))
+#define ARG_6(x) ((u32)(x) << (5 * ARG_TYPE_WIDTH))
+
+#define ARGI_LIST1(a) (ARG_1(a))
+#define ARGI_LIST2(a,b) (ARG_1(b)|ARG_2(a))
+#define ARGI_LIST3(a,b,c) (ARG_1(c)|ARG_2(b)|ARG_3(a))
+#define ARGI_LIST4(a,b,c,d) (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a))
+#define ARGI_LIST5(a,b,c,d,e) (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a))
+#define ARGI_LIST6(a,b,c,d,e,f) (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a))
+
+#define ARGP_LIST1(a) (ARG_1(a))
+#define ARGP_LIST2(a,b) (ARG_1(a)|ARG_2(b))
+#define ARGP_LIST3(a,b,c) (ARG_1(a)|ARG_2(b)|ARG_3(c))
+#define ARGP_LIST4(a,b,c,d) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d))
+#define ARGP_LIST5(a,b,c,d,e) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e))
+#define ARGP_LIST6(a,b,c,d,e,f) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f))
+
+#define GET_CURRENT_ARG_TYPE(list) (list & ((u32) 0x1F))
+#define INCREMENT_ARG_LIST(list) (list >>= ((u32) ARG_TYPE_WIDTH))
+
+#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
+/*
+ * Module name is include in both debug and non-debug versions primarily for
+ * error messages. The __FILE__ macro is not very useful for this, because it
+ * often includes the entire pathname to the module
+ */
+#define ACPI_MODULE_NAME(name) static char ACPI_UNUSED_VAR *_acpi_module_name = name;
+#else
+#define ACPI_MODULE_NAME(name)
+#endif
+
+/*
+ * Ascii error messages can be configured out
+ */
+#ifndef ACPI_NO_ERROR_MESSAGES
+#define AE_INFO _acpi_module_name, __LINE__
+
+/*
+ * Error reporting. Callers module and line number are inserted by AE_INFO,
+ * the plist contains a set of parens to allow variable-length lists.
+ * These macros are used for both the debug and non-debug versions of the code.
+ */
+#define ACPI_INFO(plist) acpi_ut_info plist
+#define ACPI_WARNING(plist) acpi_ut_warning plist
+#define ACPI_EXCEPTION(plist) acpi_ut_exception plist
+#define ACPI_ERROR(plist) acpi_ut_error plist
+#define ACPI_ERROR_NAMESPACE(s,e) acpi_ns_report_error (AE_INFO, s, e);
+#define ACPI_ERROR_METHOD(s,n,p,e) acpi_ns_report_method_error (AE_INFO, s, n, p, e);
+
+#else
+
+/* No error messages */
+
+#define ACPI_INFO(plist)
+#define ACPI_WARNING(plist)
+#define ACPI_EXCEPTION(plist)
+#define ACPI_ERROR(plist)
+#define ACPI_ERROR_NAMESPACE(s,e)
+#define ACPI_ERROR_METHOD(s,n,p,e)
+#endif
+
+/*
+ * Debug macros that are conditionally compiled
+ */
+#ifdef ACPI_DEBUG_OUTPUT
+
+/*
+ * Common parameters used for debug output functions:
+ * line number, function name, module(file) name, component ID
+ */
+#define ACPI_DEBUG_PARAMETERS __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT
+
+/*
+ * Function entry tracing
+ */
+
+/*
+ * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
+ * define it now. This is the case where there the compiler does not support
+ * a __FUNCTION__ macro or equivalent. We save the function name on the
+ * local stack.
+ */
+#ifndef ACPI_GET_FUNCTION_NAME
+#define ACPI_GET_FUNCTION_NAME _acpi_function_name
+/*
+ * The Name parameter should be the procedure name as a quoted string.
+ * This is declared as a local string ("MyFunctionName") so that it can
+ * be also used by the function exit macros below.
+ * Note: (const char) is used to be compatible with the debug interfaces
+ * and macros such as __FUNCTION__.
+ */
+#define ACPI_FUNCTION_NAME(name) const char *_acpi_function_name = #name;
+
+#else
+/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
+
+#define ACPI_FUNCTION_NAME(name)
+#endif
+
+#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
+ acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
+#define ACPI_FUNCTION_TRACE_PTR(a,b) ACPI_FUNCTION_NAME(a) \
+ acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS,(void *)b)
+#define ACPI_FUNCTION_TRACE_U32(a,b) ACPI_FUNCTION_NAME(a) \
+ acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS,(u32)b)
+#define ACPI_FUNCTION_TRACE_STR(a,b) ACPI_FUNCTION_NAME(a) \
+ acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS,(char *)b)
+
+#define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr()
+
+/*
+ * Function exit tracing.
+ * WARNING: These macros include a return statement. This is usually considered
+ * bad form, but having a separate exit macro is very ugly and difficult to maintain.
+ * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
+ * so that "_AcpiFunctionName" is defined.
+ *
+ * Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
+ * about these constructs.
+ */
+#ifdef ACPI_USE_DO_WHILE_0
+#define ACPI_DO_WHILE0(a) do a while(0)
+#else
+#define ACPI_DO_WHILE0(a) a
+#endif
+
+#define return_VOID ACPI_DO_WHILE0 ({ \
+ acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
+ return;})
+/*
+ * There are two versions of most of the return macros. The default version is
+ * safer, since it avoids side-effects by guaranteeing that the argument will
+ * not be evaluated twice.
+ *
+ * A less-safe version of the macros is provided for optional use if the
+ * compiler uses excessive CPU stack (for example, this may happen in the
+ * debug case if code optimzation is disabled.)
+ */
+#ifndef ACPI_SIMPLE_RETURN_MACROS
+
+#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
+ register acpi_status _s = (s); \
+ acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, _s); \
+ return (_s); })
+#define return_PTR(s) ACPI_DO_WHILE0 ({ \
+ register void *_s = (void *) (s); \
+ acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) _s); \
+ return (_s); })
+#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
+ register acpi_integer _s = (s); \
+ acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, _s); \
+ return (_s); })
+#define return_UINT8(s) ACPI_DO_WHILE0 ({ \
+ register u8 _s = (u8) (s); \
+ acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) _s); \
+ return (_s); })
+#define return_UINT32(s) ACPI_DO_WHILE0 ({ \
+ register u32 _s = (u32) (s); \
+ acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) _s); \
+ return (_s); })
+#else /* Use original less-safe macros */
+
+#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
+ acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, (s)); \
+ return((s)); })
+#define return_PTR(s) ACPI_DO_WHILE0 ({ \
+ acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) (s)); \
+ return((s)); })
+#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
+ acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) (s)); \
+ return((s)); })
+#define return_UINT8(s) return_VALUE(s)
+#define return_UINT32(s) return_VALUE(s)
+
+#endif /* ACPI_SIMPLE_RETURN_MACROS */
+
+/* Conditional execution */
+
+#define ACPI_DEBUG_EXEC(a) a
+#define ACPI_NORMAL_EXEC(a)
+
+#define ACPI_DEBUG_DEFINE(a) a;
+#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
+#define _VERBOSE_STRUCTURES
+
+/* Stack and buffer dumping */
+
+#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a),0)
+#define ACPI_DUMP_OPERANDS(a,b,c,d,e) acpi_ex_dump_operands(a,b,c,d,e,_acpi_module_name,__LINE__)
+
+#define ACPI_DUMP_ENTRY(a,b) acpi_ns_dump_entry (a,b)
+#define ACPI_DUMP_PATHNAME(a,b,c,d) acpi_ns_dump_pathname(a,b,c,d)
+#define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a)
+#define ACPI_DUMP_BUFFER(a,b) acpi_ut_dump_buffer((u8 *)a,b,DB_BYTE_DISPLAY,_COMPONENT)
+
+/*
+ * Master debug print macros
+ * Print iff:
+ * 1) Debug print for the current component is enabled
+ * 2) Debug error level or trace level for the print statement is enabled
+ */
+#define ACPI_DEBUG_PRINT(plist) acpi_ut_debug_print plist
+#define ACPI_DEBUG_PRINT_RAW(plist) acpi_ut_debug_print_raw plist
+
+#else
+/*
+ * This is the non-debug case -- make everything go away,
+ * leaving no executable debug code!
+ */
+#define ACPI_DEBUG_EXEC(a)
+#define ACPI_NORMAL_EXEC(a) a;
+
+#define ACPI_DEBUG_DEFINE(a)
+#define ACPI_DEBUG_ONLY_MEMBERS(a)
+#define ACPI_FUNCTION_NAME(a)
+#define ACPI_FUNCTION_TRACE(a)
+#define ACPI_FUNCTION_TRACE_PTR(a,b)
+#define ACPI_FUNCTION_TRACE_U32(a,b)
+#define ACPI_FUNCTION_TRACE_STR(a,b)
+#define ACPI_FUNCTION_EXIT
+#define ACPI_FUNCTION_STATUS_EXIT(s)
+#define ACPI_FUNCTION_VALUE_EXIT(s)
+#define ACPI_FUNCTION_ENTRY()
+#define ACPI_DUMP_STACK_ENTRY(a)
+#define ACPI_DUMP_OPERANDS(a,b,c,d,e)
+#define ACPI_DUMP_ENTRY(a,b)
+#define ACPI_DUMP_TABLES(a,b)
+#define ACPI_DUMP_PATHNAME(a,b,c,d)
+#define ACPI_DUMP_RESOURCE_LIST(a)
+#define ACPI_DUMP_BUFFER(a,b)
+#define ACPI_DEBUG_PRINT(pl)
+#define ACPI_DEBUG_PRINT_RAW(pl)
+
+#define return_VOID return
+#define return_ACPI_STATUS(s) return(s)
+#define return_VALUE(s) return(s)
+#define return_UINT8(s) return(s)
+#define return_UINT32(s) return(s)
+#define return_PTR(s) return(s)
+
+#endif
+
+/*
+ * Some code only gets executed when the debugger is built in.
+ * Note that this is entirely independent of whether the
+ * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not.
+ */
+#ifdef ACPI_DEBUGGER
+#define ACPI_DEBUGGER_EXEC(a) a
+#else
+#define ACPI_DEBUGGER_EXEC(a)
+#endif
+
+/*
+ * For 16-bit code, we want to shrink some things even though
+ * we are using ACPI_DEBUG_OUTPUT to get the debug output
+ */
+#if ACPI_MACHINE_WIDTH == 16
+#undef ACPI_DEBUG_ONLY_MEMBERS
+#undef _VERBOSE_STRUCTURES
+#define ACPI_DEBUG_ONLY_MEMBERS(a)
+#endif
+
+#ifdef ACPI_DEBUG_OUTPUT
+/*
+ * 1) Set name to blanks
+ * 2) Copy the object name
+ */
+#define ACPI_ADD_OBJECT_NAME(a,b) ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\
+ ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name))
+#else
+
+#define ACPI_ADD_OBJECT_NAME(a,b)
+#endif
+
+/*
+ * Memory allocation tracking (DEBUG ONLY)
+ */
+#ifndef ACPI_DBG_TRACK_ALLOCATIONS
+
+/* Memory allocation */
+
+#ifndef ACPI_ALLOCATE
+#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
+#endif
+#ifndef ACPI_ALLOCATE_ZEROED
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
+#endif
+#ifndef ACPI_FREE
+#define ACPI_FREE(a) acpio_os_free(a)
+#endif
+#define ACPI_MEM_TRACKING(a)
+
+#else
+
+/* Memory allocation */
+
+#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_FREE(a) acpi_ut_free_and_track(a,_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_MEM_TRACKING(a) a
+
+#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
+
+#endif /* ACMACROS_H */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acnames.h - Global names and strings
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACNAMES_H__
+#define __ACNAMES_H__
+
+/* Method names - these methods can appear anywhere in the namespace */
+
+#define METHOD_NAME__HID "_HID"
+#define METHOD_NAME__CID "_CID"
+#define METHOD_NAME__UID "_UID"
+#define METHOD_NAME__ADR "_ADR"
+#define METHOD_NAME__INI "_INI"
+#define METHOD_NAME__STA "_STA"
+#define METHOD_NAME__REG "_REG"
+#define METHOD_NAME__SEG "_SEG"
+#define METHOD_NAME__BBN "_BBN"
+#define METHOD_NAME__PRT "_PRT"
+#define METHOD_NAME__CRS "_CRS"
+#define METHOD_NAME__PRS "_PRS"
+#define METHOD_NAME__PRW "_PRW"
+#define METHOD_NAME__SRS "_SRS"
+
+/* Method names - these methods must appear at the namespace root */
+
+#define METHOD_NAME__BFS "\\_BFS"
+#define METHOD_NAME__GTS "\\_GTS"
+#define METHOD_NAME__PTS "\\_PTS"
+#define METHOD_NAME__SST "\\_SI._SST"
+#define METHOD_NAME__WAK "\\_WAK"
+
+/* Definitions of the predefined namespace names */
+
+#define ACPI_UNKNOWN_NAME (u32) 0x3F3F3F3F /* Unknown name is "????" */
+#define ACPI_ROOT_NAME (u32) 0x5F5F5F5C /* Root name is "\___" */
+
+#define ACPI_PREFIX_MIXED (u32) 0x69706341 /* "Acpi" */
+#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */
+
+#define ACPI_NS_ROOT_PATH "\\"
+#define ACPI_NS_SYSTEM_BUS "_SB_"
+
+#endif /* __ACNAMES_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acnamesp.h - Namespace subcomponent prototypes and defines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACNAMESP_H__
+#define __ACNAMESP_H__
+
+/* To search the entire name space, pass this as search_base */
+
+#define ACPI_NS_ALL ((acpi_handle)0)
+
+/*
+ * Elements of acpi_ns_properties are bit significant
+ * and should be one-to-one with values of acpi_object_type
+ */
+#define ACPI_NS_NORMAL 0
+#define ACPI_NS_NEWSCOPE 1 /* a definition of this type opens a name scope */
+#define ACPI_NS_LOCAL 2 /* suppress search of enclosing scopes */
+
+/* Flags for acpi_ns_lookup, acpi_ns_search_and_enter */
+
+#define ACPI_NS_NO_UPSEARCH 0
+#define ACPI_NS_SEARCH_PARENT 0x01
+#define ACPI_NS_DONT_OPEN_SCOPE 0x02
+#define ACPI_NS_NO_PEER_SEARCH 0x04
+#define ACPI_NS_ERROR_IF_FOUND 0x08
+#define ACPI_NS_PREFIX_IS_SCOPE 0x10
+#define ACPI_NS_EXTERNAL 0x20
+
+#define ACPI_NS_WALK_UNLOCK TRUE
+#define ACPI_NS_WALK_NO_UNLOCK FALSE
+
+/*
+ * nsinit - Namespace initialization
+ */
+acpi_status acpi_ns_initialize_objects(void);
+
+acpi_status acpi_ns_initialize_devices(void);
+
+/*
+ * nsload - Namespace loading
+ */
+acpi_status acpi_ns_load_namespace(void);
+
+acpi_status
+acpi_ns_load_table(struct acpi_table_desc *table_desc,
+ struct acpi_namespace_node *node);
+
+/*
+ * nswalk - walk the namespace
+ */
+acpi_status
+acpi_ns_walk_namespace(acpi_object_type type,
+ acpi_handle start_object,
+ u32 max_depth,
+ u8 unlock_before_callback,
+ acpi_walk_callback user_function,
+ void *context, void **return_value);
+
+struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
+ struct acpi_namespace_node
+ *parent,
+ struct acpi_namespace_node
+ *child);
+
+/*
+ * nsparse - table parsing
+ */
+acpi_status
+acpi_ns_parse_table(struct acpi_table_desc *table_desc,
+ struct acpi_namespace_node *scope);
+
+acpi_status
+acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc);
+
+/*
+ * nsaccess - Top-level namespace access
+ */
+acpi_status acpi_ns_root_initialize(void);
+
+acpi_status
+acpi_ns_lookup(union acpi_generic_state *scope_info,
+ char *name,
+ acpi_object_type type,
+ acpi_interpreter_mode interpreter_mode,
+ u32 flags,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node **ret_node);
+
+/*
+ * nsalloc - Named object allocation/deallocation
+ */
+struct acpi_namespace_node *acpi_ns_create_node(u32 name);
+
+void acpi_ns_delete_node(struct acpi_namespace_node *node);
+
+void
+acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_handle);
+
+void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id);
+
+void acpi_ns_detach_object(struct acpi_namespace_node *node);
+
+void acpi_ns_delete_children(struct acpi_namespace_node *parent);
+
+int acpi_ns_compare_names(char *name1, char *name2);
+
+/*
+ * nsdump - Namespace dump/print utilities
+ */
+#ifdef ACPI_FUTURE_USAGE
+void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth);
+#endif /* ACPI_FUTURE_USAGE */
+
+void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level);
+
+void
+acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component);
+
+void acpi_ns_print_pathname(u32 num_segments, char *pathname);
+
+acpi_status
+acpi_ns_dump_one_object(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+#ifdef ACPI_FUTURE_USAGE
+void
+acpi_ns_dump_objects(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle);
+#endif /* ACPI_FUTURE_USAGE */
+
+/*
+ * nseval - Namespace evaluation functions
+ */
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
+
+/*
+ * nsnames - Name and Scope manipulation
+ */
+u32 acpi_ns_opens_scope(acpi_object_type type);
+
+void
+acpi_ns_build_external_path(struct acpi_namespace_node *node,
+ acpi_size size, char *name_buffer);
+
+char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
+
+char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ns_handle_to_pathname(acpi_handle target_handle,
+ struct acpi_buffer *buffer);
+
+u8
+acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
+
+acpi_status
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+ char *external_pathname,
+ u32 flags, struct acpi_namespace_node **out_node);
+
+acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node);
+
+/*
+ * nsobject - Object management for namespace nodes
+ */
+acpi_status
+acpi_ns_attach_object(struct acpi_namespace_node *node,
+ union acpi_operand_object *object, acpi_object_type type);
+
+union acpi_operand_object *acpi_ns_get_attached_object(struct
+ acpi_namespace_node
+ *node);
+
+union acpi_operand_object *acpi_ns_get_secondary_object(union
+ acpi_operand_object
+ *obj_desc);
+
+acpi_status
+acpi_ns_attach_data(struct acpi_namespace_node *node,
+ acpi_object_handler handler, void *data);
+
+acpi_status
+acpi_ns_detach_data(struct acpi_namespace_node *node,
+ acpi_object_handler handler);
+
+acpi_status
+acpi_ns_get_attached_data(struct acpi_namespace_node *node,
+ acpi_object_handler handler, void **data);
+
+/*
+ * nssearch - Namespace searching and entry
+ */
+acpi_status
+acpi_ns_search_and_enter(u32 entry_name,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node *node,
+ acpi_interpreter_mode interpreter_mode,
+ acpi_object_type type,
+ u32 flags, struct acpi_namespace_node **ret_node);
+
+acpi_status
+acpi_ns_search_one_scope(u32 entry_name,
+ struct acpi_namespace_node *node,
+ acpi_object_type type,
+ struct acpi_namespace_node **ret_node);
+
+void
+acpi_ns_install_node(struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node *parent_node,
+ struct acpi_namespace_node *node, acpi_object_type type);
+
+/*
+ * nsutils - Utility functions
+ */
+u8 acpi_ns_valid_root_prefix(char prefix);
+
+acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
+
+u32 acpi_ns_local(acpi_object_type type);
+
+void
+acpi_ns_report_error(char *module_name,
+ u32 line_number,
+ char *internal_name, acpi_status lookup_status);
+
+void
+acpi_ns_report_method_error(char *module_name,
+ u32 line_number,
+ char *message,
+ struct acpi_namespace_node *node,
+ char *path, acpi_status lookup_status);
+
+void acpi_ns_print_node_pathname(struct acpi_namespace_node *node, char *msg);
+
+acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info);
+
+void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info);
+
+acpi_status acpi_ns_internalize_name(char *dotted_name, char **converted_name);
+
+acpi_status
+acpi_ns_externalize_name(u32 internal_name_length,
+ char *internal_name,
+ u32 * converted_name_length, char **converted_name);
+
+struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle);
+
+acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node);
+
+void acpi_ns_terminate(void);
+
+struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
+ *node);
+
+struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
+ acpi_namespace_node
+ *node);
+
+#endif /* __ACNAMESP_H__ */
--- /dev/null
+
+/******************************************************************************
+ *
+ * Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ACOBJECT_H
+#define _ACOBJECT_H
+
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
+/*
+ * The union acpi_operand_object is used to pass AML operands from the dispatcher
+ * to the interpreter, and to keep track of the various handlers such as
+ * address space handlers and notify handlers. The object is a constant
+ * size in order to allow it to be cached and reused.
+ */
+
+/*******************************************************************************
+ *
+ * Common Descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Common area for all objects.
+ *
+ * descriptor_type is used to differentiate between internal descriptors, and
+ * must be in the same place across all descriptors
+ *
+ * Note: The descriptor_type and Type fields must appear in the identical
+ * position in both the struct acpi_namespace_node and union acpi_operand_object
+ * structures.
+ */
+#define ACPI_OBJECT_COMMON_HEADER \
+ union acpi_operand_object *next_object; /* Objects linked to parent NS node */\
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 type; /* acpi_object_type */\
+ u16 reference_count; /* For object deletion management */\
+ u8 flags;
+ /*
+ * Note: There are 3 bytes available here before the
+ * next natural alignment boundary (for both 32/64 cases)
+ */
+
+/* Values for Flag byte above */
+
+#define AOPOBJ_AML_CONSTANT 0x01
+#define AOPOBJ_STATIC_POINTER 0x02
+#define AOPOBJ_DATA_VALID 0x04
+#define AOPOBJ_OBJECT_INITIALIZED 0x08
+#define AOPOBJ_SETUP_COMPLETE 0x10
+#define AOPOBJ_SINGLE_DATUM 0x20
+#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
+
+/******************************************************************************
+ *
+ * Basic data types
+ *
+ *****************************************************************************/
+
+struct acpi_object_common {
+ACPI_OBJECT_COMMON_HEADER};
+
+struct acpi_object_integer {
+ ACPI_OBJECT_COMMON_HEADER acpi_integer value;
+};
+
+/*
+ * Note: The String and Buffer object must be identical through the Pointer
+ * and length elements. There is code that depends on this.
+ *
+ * Fields common to both Strings and Buffers
+ */
+#define ACPI_COMMON_BUFFER_INFO(_type) \
+ _type *pointer; \
+ u32 length;
+
+struct acpi_object_string { /* Null terminated, ASCII characters only */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
+};
+
+struct acpi_object_buffer {
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(u8) /* Buffer in AML stream or allocated buffer */
+ u32 aml_length;
+ u8 *aml_start;
+ struct acpi_namespace_node *node; /* Link back to parent node */
+};
+
+struct acpi_object_package {
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Link back to parent node */
+ union acpi_operand_object **elements; /* Array of pointers to acpi_objects */
+ u8 *aml_start;
+ u32 aml_length;
+ u32 count; /* # of elements in package */
+};
+
+/******************************************************************************
+ *
+ * Complex data types
+ *
+ *****************************************************************************/
+
+struct acpi_object_event {
+ ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore; /* Actual OS synchronization object */
+};
+
+struct acpi_object_mutex {
+ ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
+ u16 acquisition_depth; /* Allow multiple Acquires, same thread */
+ struct acpi_thread_state *owner_thread; /* Current owner of the mutex */
+ acpi_mutex os_mutex; /* Actual OS synchronization object */
+ union acpi_operand_object *prev; /* Link for list of acquired mutexes */
+ union acpi_operand_object *next; /* Link for list of acquired mutexes */
+ struct acpi_namespace_node *node; /* Containing namespace node */
+ u8 original_sync_level; /* Owner's original sync level (0-15) */
+};
+
+struct acpi_object_region {
+ ACPI_OBJECT_COMMON_HEADER u8 space_id;
+ struct acpi_namespace_node *node; /* Containing namespace node */
+ union acpi_operand_object *handler; /* Handler for region access */
+ union acpi_operand_object *next;
+ acpi_physical_address address;
+ u32 length;
+};
+
+struct acpi_object_method {
+ ACPI_OBJECT_COMMON_HEADER u8 method_flags;
+ u8 param_count;
+ u8 sync_level;
+ union acpi_operand_object *mutex;
+ u8 *aml_start;
+ ACPI_INTERNAL_METHOD implementation;
+ u32 aml_length;
+ u8 thread_count;
+ acpi_owner_id owner_id;
+};
+
+/******************************************************************************
+ *
+ * Objects that can be notified. All share a common notify_info area.
+ *
+ *****************************************************************************/
+
+/*
+ * Common fields for objects that support ASL notifications
+ */
+#define ACPI_COMMON_NOTIFY_INFO \
+ union acpi_operand_object *system_notify; /* Handler for system notifies */\
+ union acpi_operand_object *device_notify; /* Handler for driver notifies */\
+ union acpi_operand_object *handler; /* Handler for Address space */
+
+struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
+
+struct acpi_object_device {
+ ACPI_OBJECT_COMMON_HEADER
+ ACPI_COMMON_NOTIFY_INFO struct acpi_gpe_block_info *gpe_block;
+};
+
+struct acpi_object_power_resource {
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO u32 system_level;
+ u32 resource_order;
+};
+
+struct acpi_object_processor {
+ ACPI_OBJECT_COMMON_HEADER u8 proc_id;
+ u8 length;
+ ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
+};
+
+struct acpi_object_thermal_zone {
+ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
+
+/******************************************************************************
+ *
+ * Fields. All share a common header/info field.
+ *
+ *****************************************************************************/
+
+/*
+ * Common bitfield for the field objects
+ * "Field Datum" -- a datum from the actual field object
+ * "Buffer Datum" -- a datum from a user buffer, read from or to be written to the field
+ */
+#define ACPI_COMMON_FIELD_INFO \
+ u8 field_flags; /* Access, update, and lock bits */\
+ u8 attribute; /* From access_as keyword */\
+ u8 access_byte_width; /* Read/Write size in bytes */\
+ struct acpi_namespace_node *node; /* Link back to parent node */\
+ u32 bit_length; /* Length of field in bits */\
+ u32 base_byte_offset; /* Byte offset within containing object */\
+ u32 value; /* Value to store into the Bank or Index register */\
+ u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+ u8 access_bit_width; /* Read/Write size in bits (8-64) */
+
+struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
+};
+
+struct acpi_object_region_field {
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+};
+
+struct acpi_object_bank_field {
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+ union acpi_operand_object *bank_obj; /* bank_select Register object */
+};
+
+struct acpi_object_index_field {
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO
+ /*
+ * No "RegionObj" pointer needed since the Index and Data registers
+ * are each field definitions unto themselves.
+ */
+ union acpi_operand_object *index_obj; /* Index register */
+ union acpi_operand_object *data_obj; /* Data register */
+};
+
+/* The buffer_field is different in that it is part of a Buffer, not an op_region */
+
+struct acpi_object_buffer_field {
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */
+};
+
+/******************************************************************************
+ *
+ * Objects for handlers
+ *
+ *****************************************************************************/
+
+struct acpi_object_notify_handler {
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */
+ acpi_notify_handler handler;
+ void *context;
+};
+
+struct acpi_object_addr_handler {
+ ACPI_OBJECT_COMMON_HEADER u8 space_id;
+ u8 handler_flags;
+ acpi_adr_space_handler handler;
+ struct acpi_namespace_node *node; /* Parent device */
+ void *context;
+ acpi_adr_space_setup setup;
+ union acpi_operand_object *region_list; /* regions using this handler */
+ union acpi_operand_object *next;
+};
+
+/* Flags for address handler (handler_flags) */
+
+#define ACPI_ADDR_HANDLER_DEFAULT_INSTALLED 0x01
+
+/******************************************************************************
+ *
+ * Special internal objects
+ *
+ *****************************************************************************/
+
+/*
+ * The Reference object type is used for these opcodes:
+ * Arg[0-6], Local[0-7], index_op, name_op, zero_op, one_op, ones_op, debug_op
+ */
+struct acpi_object_reference {
+ ACPI_OBJECT_COMMON_HEADER u8 target_type; /* Used for index_op */
+ u16 opcode;
+ void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
+ struct acpi_namespace_node *node;
+ union acpi_operand_object **where;
+ u32 offset; /* Used for arg_op, local_op, and index_op */
+};
+
+/*
+ * Extra object is used as additional storage for types that
+ * have AML code in their declarations (term_args) that must be
+ * evaluated at run time.
+ *
+ * Currently: Region and field_unit types
+ */
+struct acpi_object_extra {
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ void *region_context; /* Region-specific data */
+ u8 *aml_start;
+ u32 aml_length;
+};
+
+/* Additional data that can be attached to namespace nodes */
+
+struct acpi_object_data {
+ ACPI_OBJECT_COMMON_HEADER acpi_object_handler handler;
+ void *pointer;
+};
+
+/* Structure used when objects are cached for reuse */
+
+struct acpi_object_cache_list {
+ ACPI_OBJECT_COMMON_HEADER union acpi_operand_object *next; /* Link for object cache and internal lists */
+};
+
+/******************************************************************************
+ *
+ * union acpi_operand_object Descriptor - a giant union of all of the above
+ *
+ *****************************************************************************/
+
+union acpi_operand_object {
+ struct acpi_object_common common;
+ struct acpi_object_integer integer;
+ struct acpi_object_string string;
+ struct acpi_object_buffer buffer;
+ struct acpi_object_package package;
+ struct acpi_object_event event;
+ struct acpi_object_method method;
+ struct acpi_object_mutex mutex;
+ struct acpi_object_region region;
+ struct acpi_object_notify_common common_notify;
+ struct acpi_object_device device;
+ struct acpi_object_power_resource power_resource;
+ struct acpi_object_processor processor;
+ struct acpi_object_thermal_zone thermal_zone;
+ struct acpi_object_field_common common_field;
+ struct acpi_object_region_field field;
+ struct acpi_object_buffer_field buffer_field;
+ struct acpi_object_bank_field bank_field;
+ struct acpi_object_index_field index_field;
+ struct acpi_object_notify_handler notify;
+ struct acpi_object_addr_handler address_space;
+ struct acpi_object_reference reference;
+ struct acpi_object_extra extra;
+ struct acpi_object_data data;
+ struct acpi_object_cache_list cache;
+};
+
+/******************************************************************************
+ *
+ * union acpi_descriptor - objects that share a common descriptor identifier
+ *
+ *****************************************************************************/
+
+/* Object descriptor types */
+
+#define ACPI_DESC_TYPE_CACHED 0x01 /* Used only when object is cached */
+#define ACPI_DESC_TYPE_STATE 0x02
+#define ACPI_DESC_TYPE_STATE_UPDATE 0x03
+#define ACPI_DESC_TYPE_STATE_PACKAGE 0x04
+#define ACPI_DESC_TYPE_STATE_CONTROL 0x05
+#define ACPI_DESC_TYPE_STATE_RPSCOPE 0x06
+#define ACPI_DESC_TYPE_STATE_PSCOPE 0x07
+#define ACPI_DESC_TYPE_STATE_WSCOPE 0x08
+#define ACPI_DESC_TYPE_STATE_RESULT 0x09
+#define ACPI_DESC_TYPE_STATE_NOTIFY 0x0A
+#define ACPI_DESC_TYPE_STATE_THREAD 0x0B
+#define ACPI_DESC_TYPE_WALK 0x0C
+#define ACPI_DESC_TYPE_PARSER 0x0D
+#define ACPI_DESC_TYPE_OPERAND 0x0E
+#define ACPI_DESC_TYPE_NAMED 0x0F
+#define ACPI_DESC_TYPE_MAX 0x0F
+
+struct acpi_common_descriptor {
+ void *common_pointer;
+ u8 descriptor_type; /* To differentiate various internal objs */
+};
+
+union acpi_descriptor {
+ struct acpi_common_descriptor common;
+ union acpi_operand_object object;
+ struct acpi_namespace_node node;
+ union acpi_parse_object op;
+};
+
+#endif /* _ACOBJECT_H */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acopcode.h - AML opcode information for the AML parser and interpreter
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACOPCODE_H__
+#define __ACOPCODE_H__
+
+#define MAX_EXTENDED_OPCODE 0x88
+#define NUM_EXTENDED_OPCODE (MAX_EXTENDED_OPCODE + 1)
+#define MAX_INTERNAL_OPCODE
+#define NUM_INTERNAL_OPCODE (MAX_INTERNAL_OPCODE + 1)
+
+/* Used for non-assigned opcodes */
+
+#define _UNK 0x6B
+
+/*
+ * Reserved ASCII characters. Do not use any of these for
+ * internal opcodes, since they are used to differentiate
+ * name strings from AML opcodes
+ */
+#define _ASC 0x6C
+#define _NAM 0x6C
+#define _PFX 0x6D
+
+/*
+ * All AML opcodes and the parse-time arguments for each. Used by the AML
+ * parser Each list is compressed into a 32-bit number and stored in the
+ * master opcode table (in psopcode.c).
+ */
+#define ARGP_ACCESSFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_ACQUIRE_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_WORDDATA)
+#define ARGP_ADD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_ALIAS_OP ARGP_LIST2 (ARGP_NAMESTRING, ARGP_NAME)
+#define ARGP_ARG0 ARG_NONE
+#define ARGP_ARG1 ARG_NONE
+#define ARGP_ARG2 ARG_NONE
+#define ARGP_ARG3 ARG_NONE
+#define ARGP_ARG4 ARG_NONE
+#define ARGP_ARG5 ARG_NONE
+#define ARGP_ARG6 ARG_NONE
+#define ARGP_BANK_FIELD_OP ARGP_LIST6 (ARGP_PKGLENGTH, ARGP_NAMESTRING, ARGP_NAMESTRING,ARGP_TERMARG, ARGP_BYTEDATA, ARGP_FIELDLIST)
+#define ARGP_BIT_AND_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_BIT_NAND_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_BIT_NOR_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_BIT_NOT_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_BIT_OR_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_BIT_XOR_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_BREAK_OP ARG_NONE
+#define ARGP_BREAK_POINT_OP ARG_NONE
+#define ARGP_BUFFER_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_BYTELIST)
+#define ARGP_BYTE_OP ARGP_LIST1 (ARGP_BYTEDATA)
+#define ARGP_BYTELIST_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
+#define ARGP_CONTINUE_OP ARG_NONE
+#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
+#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
+#define ARGP_CREATE_BYTE_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
+#define ARGP_CREATE_DWORD_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
+#define ARGP_CREATE_FIELD_OP ARGP_LIST4 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
+#define ARGP_CREATE_QWORD_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
+#define ARGP_CREATE_WORD_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
+#define ARGP_DATA_REGION_OP ARGP_LIST4 (ARGP_NAME, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_DEBUG_OP ARG_NONE
+#define ARGP_DECREMENT_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_DEREF_OF_OP ARGP_LIST1 (ARGP_TERMARG)
+#define ARGP_DEVICE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_OBJLIST)
+#define ARGP_DIVIDE_OP ARGP_LIST4 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET, ARGP_TARGET)
+#define ARGP_DWORD_OP ARGP_LIST1 (ARGP_DWORDDATA)
+#define ARGP_ELSE_OP ARGP_LIST2 (ARGP_PKGLENGTH, ARGP_TERMLIST)
+#define ARGP_EVENT_OP ARGP_LIST1 (ARGP_NAME)
+#define ARGP_FATAL_OP ARGP_LIST3 (ARGP_BYTEDATA, ARGP_DWORDDATA, ARGP_TERMARG)
+#define ARGP_FIELD_OP ARGP_LIST4 (ARGP_PKGLENGTH, ARGP_NAMESTRING, ARGP_BYTEDATA, ARGP_FIELDLIST)
+#define ARGP_FIND_SET_LEFT_BIT_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_FIND_SET_RIGHT_BIT_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_FROM_BCD_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_IF_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_TERMLIST)
+#define ARGP_INCREMENT_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_INDEX_FIELD_OP ARGP_LIST5 (ARGP_PKGLENGTH, ARGP_NAMESTRING, ARGP_NAMESTRING,ARGP_BYTEDATA, ARGP_FIELDLIST)
+#define ARGP_INDEX_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_LAND_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LEQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LGREATER_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LGREATEREQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LLESS_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LLESSEQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LNOT_OP ARGP_LIST1 (ARGP_TERMARG)
+#define ARGP_LNOTEQUAL_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LOAD_OP ARGP_LIST2 (ARGP_NAMESTRING, ARGP_SUPERNAME)
+#define ARGP_LOAD_TABLE_OP ARGP_LIST6 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_LOCAL0 ARG_NONE
+#define ARGP_LOCAL1 ARG_NONE
+#define ARGP_LOCAL2 ARG_NONE
+#define ARGP_LOCAL3 ARG_NONE
+#define ARGP_LOCAL4 ARG_NONE
+#define ARGP_LOCAL5 ARG_NONE
+#define ARGP_LOCAL6 ARG_NONE
+#define ARGP_LOCAL7 ARG_NONE
+#define ARGP_LOR_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_MATCH_OP ARGP_LIST6 (ARGP_TERMARG, ARGP_BYTEDATA, ARGP_TERMARG, ARGP_BYTEDATA, ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_METHOD_OP ARGP_LIST4 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_TERMLIST)
+#define ARGP_METHODCALL_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_MID_OP ARGP_LIST4 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_MOD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_MULTIPLY_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_MUTEX_OP ARGP_LIST2 (ARGP_NAME, ARGP_BYTEDATA)
+#define ARGP_NAME_OP ARGP_LIST2 (ARGP_NAME, ARGP_DATAOBJ)
+#define ARGP_NAMEDFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_NAMEPATH_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_NOOP_OP ARG_NONE
+#define ARGP_NOTIFY_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_TERMARG)
+#define ARGP_ONE_OP ARG_NONE
+#define ARGP_ONES_OP ARG_NONE
+#define ARGP_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_BYTEDATA, ARGP_DATAOBJLIST)
+#define ARGP_POWER_RES_OP ARGP_LIST5 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_WORDDATA, ARGP_OBJLIST)
+#define ARGP_PROCESSOR_OP ARGP_LIST6 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_BYTEDATA, ARGP_DWORDDATA, ARGP_BYTEDATA, ARGP_OBJLIST)
+#define ARGP_QWORD_OP ARGP_LIST1 (ARGP_QWORDDATA)
+#define ARGP_REF_OF_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_REGION_OP ARGP_LIST4 (ARGP_NAME, ARGP_BYTEDATA, ARGP_TERMARG, ARGP_TERMARG)
+#define ARGP_RELEASE_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_RESERVEDFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_RESET_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG)
+#define ARGP_REVISION_OP ARG_NONE
+#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
+#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_SIZE_OF_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_SLEEP_OP ARGP_LIST1 (ARGP_TERMARG)
+#define ARGP_STALL_OP ARGP_LIST1 (ARGP_TERMARG)
+#define ARGP_STATICSTRING_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_STORE_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SUPERNAME)
+#define ARGP_STRING_OP ARGP_LIST1 (ARGP_CHARLIST)
+#define ARGP_SUBTRACT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_THERMAL_ZONE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_OBJLIST)
+#define ARGP_TIMER_OP ARG_NONE
+#define ARGP_TO_BCD_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_BUFFER_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_DEC_STR_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_HEX_STR_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_INTEGER_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TO_STRING_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
+#define ARGP_TYPE_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_UNLOAD_OP ARGP_LIST1 (ARGP_SUPERNAME)
+#define ARGP_VAR_PACKAGE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_DATAOBJLIST)
+#define ARGP_WAIT_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_TERMARG)
+#define ARGP_WHILE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_TERMLIST)
+#define ARGP_WORD_OP ARGP_LIST1 (ARGP_WORDDATA)
+#define ARGP_ZERO_OP ARG_NONE
+
+/*
+ * All AML opcodes and the runtime arguments for each. Used by the AML
+ * interpreter Each list is compressed into a 32-bit number and stored
+ * in the master opcode table (in psopcode.c).
+ *
+ * (Used by prep_operands procedure and the ASL Compiler)
+ */
+#define ARGI_ACCESSFIELD_OP ARGI_INVALID_OPCODE
+#define ARGI_ACQUIRE_OP ARGI_LIST2 (ARGI_MUTEX, ARGI_INTEGER)
+#define ARGI_ADD_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_ALIAS_OP ARGI_INVALID_OPCODE
+#define ARGI_ARG0 ARG_NONE
+#define ARGI_ARG1 ARG_NONE
+#define ARGI_ARG2 ARG_NONE
+#define ARGI_ARG3 ARG_NONE
+#define ARGI_ARG4 ARG_NONE
+#define ARGI_ARG5 ARG_NONE
+#define ARGI_ARG6 ARG_NONE
+#define ARGI_BANK_FIELD_OP ARGI_INVALID_OPCODE
+#define ARGI_BIT_AND_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_NAND_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_NOR_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_NOT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_OR_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BIT_XOR_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_BREAK_OP ARG_NONE
+#define ARGI_BREAK_POINT_OP ARG_NONE
+#define ARGI_BUFFER_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_BYTE_OP ARGI_INVALID_OPCODE
+#define ARGI_BYTELIST_OP ARGI_INVALID_OPCODE
+#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
+#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
+#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
+#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
+#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
+#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_CREATE_BYTE_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_CREATE_DWORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_CREATE_FIELD_OP ARGI_LIST4 (ARGI_BUFFER, ARGI_INTEGER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_CREATE_QWORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_CREATE_WORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
+#define ARGI_DATA_REGION_OP ARGI_LIST3 (ARGI_STRING, ARGI_STRING, ARGI_STRING)
+#define ARGI_DEBUG_OP ARG_NONE
+#define ARGI_DECREMENT_OP ARGI_LIST1 (ARGI_INTEGER_REF)
+#define ARGI_DEREF_OF_OP ARGI_LIST1 (ARGI_REF_OR_STRING)
+#define ARGI_DEVICE_OP ARGI_INVALID_OPCODE
+#define ARGI_DIVIDE_OP ARGI_LIST4 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF, ARGI_TARGETREF)
+#define ARGI_DWORD_OP ARGI_INVALID_OPCODE
+#define ARGI_ELSE_OP ARGI_INVALID_OPCODE
+#define ARGI_EVENT_OP ARGI_INVALID_OPCODE
+#define ARGI_FATAL_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_FIELD_OP ARGI_INVALID_OPCODE
+#define ARGI_FIND_SET_LEFT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_FIND_SET_RIGHT_BIT_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_FROM_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_FIXED_TARGET)
+#define ARGI_IF_OP ARGI_INVALID_OPCODE
+#define ARGI_INCREMENT_OP ARGI_LIST1 (ARGI_INTEGER_REF)
+#define ARGI_INDEX_FIELD_OP ARGI_INVALID_OPCODE
+#define ARGI_INDEX_OP ARGI_LIST3 (ARGI_COMPLEXOBJ, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_LAND_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_LEQUAL_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA)
+#define ARGI_LGREATER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA)
+#define ARGI_LGREATEREQUAL_OP ARGI_INVALID_OPCODE
+#define ARGI_LLESS_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA)
+#define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE
+#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE
+#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION_OR_FIELD,ARGI_TARGETREF)
+#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_ANYTYPE)
+#define ARGI_LOCAL0 ARG_NONE
+#define ARGI_LOCAL1 ARG_NONE
+#define ARGI_LOCAL2 ARG_NONE
+#define ARGI_LOCAL3 ARG_NONE
+#define ARGI_LOCAL4 ARG_NONE
+#define ARGI_LOCAL5 ARG_NONE
+#define ARGI_LOCAL6 ARG_NONE
+#define ARGI_LOCAL7 ARG_NONE
+#define ARGI_LOR_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_MATCH_OP ARGI_LIST6 (ARGI_PACKAGE, ARGI_INTEGER, ARGI_COMPUTEDATA, ARGI_INTEGER,ARGI_COMPUTEDATA,ARGI_INTEGER)
+#define ARGI_METHOD_OP ARGI_INVALID_OPCODE
+#define ARGI_METHODCALL_OP ARGI_INVALID_OPCODE
+#define ARGI_MID_OP ARGI_LIST4 (ARGI_BUFFER_OR_STRING,ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_MOD_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_MULTIPLY_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_MUTEX_OP ARGI_INVALID_OPCODE
+#define ARGI_NAME_OP ARGI_INVALID_OPCODE
+#define ARGI_NAMEDFIELD_OP ARGI_INVALID_OPCODE
+#define ARGI_NAMEPATH_OP ARGI_INVALID_OPCODE
+#define ARGI_NOOP_OP ARG_NONE
+#define ARGI_NOTIFY_OP ARGI_LIST2 (ARGI_DEVICE_REF, ARGI_INTEGER)
+#define ARGI_ONE_OP ARG_NONE
+#define ARGI_ONES_OP ARG_NONE
+#define ARGI_PACKAGE_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_POWER_RES_OP ARGI_INVALID_OPCODE
+#define ARGI_PROCESSOR_OP ARGI_INVALID_OPCODE
+#define ARGI_QWORD_OP ARGI_INVALID_OPCODE
+#define ARGI_REF_OF_OP ARGI_LIST1 (ARGI_OBJECT_REF)
+#define ARGI_REGION_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
+#define ARGI_RELEASE_OP ARGI_LIST1 (ARGI_MUTEX)
+#define ARGI_RESERVEDFIELD_OP ARGI_INVALID_OPCODE
+#define ARGI_RESET_OP ARGI_LIST1 (ARGI_EVENT)
+#define ARGI_RETURN_OP ARGI_INVALID_OPCODE
+#define ARGI_REVISION_OP ARG_NONE
+#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
+#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
+#define ARGI_SIZE_OF_OP ARGI_LIST1 (ARGI_DATAOBJECT)
+#define ARGI_SLEEP_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_STALL_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_STATICSTRING_OP ARGI_INVALID_OPCODE
+#define ARGI_STORE_OP ARGI_LIST2 (ARGI_DATAREFOBJ, ARGI_TARGETREF)
+#define ARGI_STRING_OP ARGI_INVALID_OPCODE
+#define ARGI_SUBTRACT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
+#define ARGI_THERMAL_ZONE_OP ARGI_INVALID_OPCODE
+#define ARGI_TIMER_OP ARG_NONE
+#define ARGI_TO_BCD_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_FIXED_TARGET)
+#define ARGI_TO_BUFFER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_DEC_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_HEX_STR_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_INTEGER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_FIXED_TARGET)
+#define ARGI_TO_STRING_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_FIXED_TARGET)
+#define ARGI_TYPE_OP ARGI_LIST1 (ARGI_ANYTYPE)
+#define ARGI_UNLOAD_OP ARGI_LIST1 (ARGI_DDBHANDLE)
+#define ARGI_VAR_PACKAGE_OP ARGI_LIST1 (ARGI_INTEGER)
+#define ARGI_WAIT_OP ARGI_LIST2 (ARGI_EVENT, ARGI_INTEGER)
+#define ARGI_WHILE_OP ARGI_INVALID_OPCODE
+#define ARGI_WORD_OP ARGI_INVALID_OPCODE
+#define ARGI_ZERO_OP ARG_NONE
+
+#endif /* __ACOPCODE_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acoutput.h -- debug output
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACOUTPUT_H__
+#define __ACOUTPUT_H__
+
+/*
+ * Debug levels and component IDs. These are used to control the
+ * granularity of the output of the DEBUG_PRINT macro -- on a per-
+ * component basis and a per-exception-type basis.
+ */
+
+/* Component IDs are used in the global "DebugLayer" */
+
+#define ACPI_UTILITIES 0x00000001
+#define ACPI_HARDWARE 0x00000002
+#define ACPI_EVENTS 0x00000004
+#define ACPI_TABLES 0x00000008
+#define ACPI_NAMESPACE 0x00000010
+#define ACPI_PARSER 0x00000020
+#define ACPI_DISPATCHER 0x00000040
+#define ACPI_EXECUTER 0x00000080
+#define ACPI_RESOURCES 0x00000100
+#define ACPI_CA_DEBUGGER 0x00000200
+#define ACPI_OS_SERVICES 0x00000400
+#define ACPI_CA_DISASSEMBLER 0x00000800
+
+/* Component IDs for ACPI tools and utilities */
+
+#define ACPI_COMPILER 0x00001000
+#define ACPI_TOOLS 0x00002000
+
+#define ACPI_ALL_COMPONENTS 0x00003FFF
+#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS)
+
+/* Component IDs reserved for ACPI drivers */
+
+#define ACPI_ALL_DRIVERS 0xFFFF0000
+
+/*
+ * Raw debug output levels, do not use these in the DEBUG_PRINT macros
+ */
+#define ACPI_LV_ERROR 0x00000001
+#define ACPI_LV_WARN 0x00000002
+#define ACPI_LV_INIT 0x00000004
+#define ACPI_LV_DEBUG_OBJECT 0x00000008
+#define ACPI_LV_INFO 0x00000010
+#define ACPI_LV_ALL_EXCEPTIONS 0x0000001F
+
+/* Trace verbosity level 1 [Standard Trace Level] */
+
+#define ACPI_LV_INIT_NAMES 0x00000020
+#define ACPI_LV_PARSE 0x00000040
+#define ACPI_LV_LOAD 0x00000080
+#define ACPI_LV_DISPATCH 0x00000100
+#define ACPI_LV_EXEC 0x00000200
+#define ACPI_LV_NAMES 0x00000400
+#define ACPI_LV_OPREGION 0x00000800
+#define ACPI_LV_BFIELD 0x00001000
+#define ACPI_LV_TABLES 0x00002000
+#define ACPI_LV_VALUES 0x00004000
+#define ACPI_LV_OBJECTS 0x00008000
+#define ACPI_LV_RESOURCES 0x00010000
+#define ACPI_LV_USER_REQUESTS 0x00020000
+#define ACPI_LV_PACKAGE 0x00040000
+#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS
+
+/* Trace verbosity level 2 [Function tracing and memory allocation] */
+
+#define ACPI_LV_ALLOCATIONS 0x00100000
+#define ACPI_LV_FUNCTIONS 0x00200000
+#define ACPI_LV_OPTIMIZATIONS 0x00400000
+#define ACPI_LV_VERBOSITY2 0x00700000 | ACPI_LV_VERBOSITY1
+#define ACPI_LV_ALL ACPI_LV_VERBOSITY2
+
+/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
+
+#define ACPI_LV_MUTEX 0x01000000
+#define ACPI_LV_THREADS 0x02000000
+#define ACPI_LV_IO 0x04000000
+#define ACPI_LV_INTERRUPTS 0x08000000
+#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2
+
+/* Exceptionally verbose output -- also used in the global "DebugLevel" */
+
+#define ACPI_LV_AML_DISASSEMBLE 0x10000000
+#define ACPI_LV_VERBOSE_INFO 0x20000000
+#define ACPI_LV_FULL_TABLES 0x40000000
+#define ACPI_LV_EVENTS 0x80000000
+
+#define ACPI_LV_VERBOSE 0xF0000000
+
+/*
+ * Debug level macros that are used in the DEBUG_PRINT macros
+ */
+#define ACPI_DEBUG_LEVEL(dl) (u32) dl,ACPI_DEBUG_PARAMETERS
+
+/* Exception level -- used in the global "DebugLevel" */
+
+#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT)
+#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
+#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
+#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
+
+/*
+ * These two levels are essentially obsolete, all instances in the
+ * ACPICA core code have been replaced by ACPI_ERROR and ACPI_WARNING
+ * (Kept here because some drivers may still use them)
+ */
+#define ACPI_DB_ERROR ACPI_DEBUG_LEVEL (ACPI_LV_ERROR)
+#define ACPI_DB_WARN ACPI_DEBUG_LEVEL (ACPI_LV_WARN)
+
+/* Trace level -- also used in the global "DebugLevel" */
+
+#define ACPI_DB_INIT_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_INIT_NAMES)
+#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS)
+#define ACPI_DB_PARSE ACPI_DEBUG_LEVEL (ACPI_LV_PARSE)
+#define ACPI_DB_DISPATCH ACPI_DEBUG_LEVEL (ACPI_LV_DISPATCH)
+#define ACPI_DB_LOAD ACPI_DEBUG_LEVEL (ACPI_LV_LOAD)
+#define ACPI_DB_EXEC ACPI_DEBUG_LEVEL (ACPI_LV_EXEC)
+#define ACPI_DB_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_NAMES)
+#define ACPI_DB_OPREGION ACPI_DEBUG_LEVEL (ACPI_LV_OPREGION)
+#define ACPI_DB_BFIELD ACPI_DEBUG_LEVEL (ACPI_LV_BFIELD)
+#define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES)
+#define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS)
+#define ACPI_DB_OPTIMIZATIONS ACPI_DEBUG_LEVEL (ACPI_LV_OPTIMIZATIONS)
+#define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES)
+#define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS)
+#define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS)
+#define ACPI_DB_RESOURCES ACPI_DEBUG_LEVEL (ACPI_LV_RESOURCES)
+#define ACPI_DB_IO ACPI_DEBUG_LEVEL (ACPI_LV_IO)
+#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS)
+#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS)
+#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE)
+#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX)
+
+#define ACPI_DB_ALL ACPI_DEBUG_LEVEL (ACPI_LV_ALL)
+
+/* Defaults for debug_level, debug and normal */
+
+#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_WARN | ACPI_LV_ERROR | ACPI_LV_DEBUG_OBJECT)
+#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_WARN | ACPI_LV_ERROR | ACPI_LV_DEBUG_OBJECT)
+#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
+
+#endif /* __ACOUTPUT_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Module Name: acparser.h - AML Parser subcomponent prototypes and defines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACPARSER_H__
+#define __ACPARSER_H__
+
+#define OP_HAS_RETURN_VALUE 1
+
+/* Variable number of arguments. This field must be 32 bits */
+
+#define ACPI_VAR_ARGS ACPI_UINT32_MAX
+
+#define ACPI_PARSE_DELETE_TREE 0x0001
+#define ACPI_PARSE_NO_TREE_DELETE 0x0000
+#define ACPI_PARSE_TREE_MASK 0x0001
+
+#define ACPI_PARSE_LOAD_PASS1 0x0010
+#define ACPI_PARSE_LOAD_PASS2 0x0020
+#define ACPI_PARSE_EXECUTE 0x0030
+#define ACPI_PARSE_MODE_MASK 0x0030
+
+#define ACPI_PARSE_DEFERRED_OP 0x0100
+#define ACPI_PARSE_DISASSEMBLE 0x0200
+
+/******************************************************************************
+ *
+ * Parser interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * psxface - Parser external interfaces
+ */
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info);
+
+/*
+ * psargs - Parse AML opcode arguments
+ */
+u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state);
+
+char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state);
+
+void
+acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
+ u32 arg_type, union acpi_parse_object *arg);
+
+acpi_status
+acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
+ struct acpi_parse_state *parser_state,
+ union acpi_parse_object *arg, u8 method_call);
+
+acpi_status
+acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
+ struct acpi_parse_state *parser_state,
+ u32 arg_type, union acpi_parse_object **return_arg);
+
+/*
+ * psfind
+ */
+union acpi_parse_object *acpi_ps_find_name(union acpi_parse_object *scope,
+ u32 name, u32 opcode);
+
+union acpi_parse_object *acpi_ps_get_parent(union acpi_parse_object *op);
+
+/*
+ * psopcode - AML Opcode information
+ */
+const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
+
+char *acpi_ps_get_opcode_name(u16 opcode);
+
+/*
+ * psparse - top level parsing routines
+ */
+acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state);
+
+u32 acpi_ps_get_opcode_size(u32 opcode);
+
+u16 acpi_ps_peek_opcode(struct acpi_parse_state *state);
+
+acpi_status
+acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+acpi_status
+acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ acpi_status callback_status);
+
+/*
+ * psloop - main parse loop
+ */
+acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state);
+
+/*
+ * psscope - Scope stack management routines
+ */
+acpi_status
+acpi_ps_init_scope(struct acpi_parse_state *parser_state,
+ union acpi_parse_object *root);
+
+union acpi_parse_object *acpi_ps_get_parent_scope(struct acpi_parse_state
+ *state);
+
+u8 acpi_ps_has_completed_scope(struct acpi_parse_state *parser_state);
+
+void
+acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
+ union acpi_parse_object **op,
+ u32 * arg_list, u32 * arg_count);
+
+acpi_status
+acpi_ps_push_scope(struct acpi_parse_state *parser_state,
+ union acpi_parse_object *op,
+ u32 remaining_args, u32 arg_count);
+
+void acpi_ps_cleanup_scope(struct acpi_parse_state *state);
+
+/*
+ * pstree - parse tree manipulation routines
+ */
+void
+acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg);
+
+union acpi_parse_object *acpi_ps_find(union acpi_parse_object *scope,
+ char *path, u16 opcode, u32 create);
+
+union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn);
+
+#ifdef ACPI_FUTURE_USAGE
+union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
+ union acpi_parse_object *op);
+#endif /* ACPI_FUTURE_USAGE */
+
+/*
+ * pswalk - parse tree walk routines
+ */
+acpi_status
+acpi_ps_walk_parsed_aml(union acpi_parse_object *start_op,
+ union acpi_parse_object *end_op,
+ union acpi_operand_object *mth_desc,
+ struct acpi_namespace_node *start_node,
+ union acpi_operand_object **params,
+ union acpi_operand_object **caller_return_desc,
+ acpi_owner_id owner_id,
+ acpi_parse_downwards descending_callback,
+ acpi_parse_upwards ascending_callback);
+
+acpi_status
+acpi_ps_get_next_walk_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ acpi_parse_upwards ascending_callback);
+
+acpi_status acpi_ps_delete_completed_op(struct acpi_walk_state *walk_state);
+
+void acpi_ps_delete_parse_tree(union acpi_parse_object *root);
+
+/*
+ * psutils - parser utilities
+ */
+union acpi_parse_object *acpi_ps_create_scope_op(void);
+
+void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode);
+
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode);
+
+void acpi_ps_free_op(union acpi_parse_object *op);
+
+u8 acpi_ps_is_leading_char(u32 c);
+
+u8 acpi_ps_is_prefix_char(u32 c);
+
+#ifdef ACPI_FUTURE_USAGE
+u32 acpi_ps_get_name(union acpi_parse_object *op);
+#endif /* ACPI_FUTURE_USAGE */
+
+void acpi_ps_set_name(union acpi_parse_object *op, u32 name);
+
+/*
+ * psdump - display parser tree
+ */
+u32
+acpi_ps_sprint_path(char *buffer_start,
+ u32 buffer_size, union acpi_parse_object *op);
+
+u32
+acpi_ps_sprint_op(char *buffer_start,
+ u32 buffer_size, union acpi_parse_object *op);
+
+void acpi_ps_show(union acpi_parse_object *op);
+
+#endif /* __ACPARSER_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acpi.h - Master include file, Publics and external data.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACPI_H__
+#define __ACPI_H__
+
+/*
+ * Common includes for all ACPI driver files
+ * We put them here because we don't want to duplicate them
+ * in the rest of the source code again and again.
+ */
+#include "acnames.h" /* Global ACPI names and strings */
+#include "acconfig.h" /* Configuration constants */
+#include "platform/acenv.h" /* Target environment specific items */
+#include "actypes.h" /* Fundamental common data types */
+#include "acexcep.h" /* ACPI exception codes */
+#include "acmacros.h" /* C macros */
+#include "actbl.h" /* ACPI table definitions */
+#include "aclocal.h" /* Internal data types */
+#include "acoutput.h" /* Error output and Debug macros */
+#include "acpiosxf.h" /* Interfaces to the ACPI-to-OS layer */
+#include "acpixf.h" /* ACPI core subsystem external interfaces */
+#include "acobject.h" /* ACPI internal object */
+#include "acstruct.h" /* Common structures */
+#include "acglobal.h" /* All global variables */
+#include "achware.h" /* Hardware defines and interfaces */
+#include "acutils.h" /* Utility interfaces */
+
+#endif /* __ACPI_H__ */
--- /dev/null
+/*
+ * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __ACPI_BUS_H__
+#define __ACPI_BUS_H__
+
+#include <linux/device.h>
+
+#include <acpi/acpi.h>
+
+#define PREFIX "ACPI: "
+
+/* TBD: Make dynamic */
+#define ACPI_MAX_HANDLES 10
+struct acpi_handle_list {
+ u32 count;
+ acpi_handle handles[ACPI_MAX_HANDLES];
+};
+
+/* acpi_utils.h */
+acpi_status
+acpi_extract_package(union acpi_object *package,
+ struct acpi_buffer *format, struct acpi_buffer *buffer);
+acpi_status
+acpi_evaluate_integer(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *arguments, unsigned long *data);
+acpi_status
+acpi_evaluate_reference(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list);
+
+#ifdef CONFIG_ACPI
+
+#include <linux/proc_fs.h>
+
+#define ACPI_BUS_FILE_ROOT "acpi"
+extern struct proc_dir_entry *acpi_root_dir;
+extern struct fadt_descriptor acpi_fadt;
+
+enum acpi_bus_removal_type {
+ ACPI_BUS_REMOVAL_NORMAL = 0,
+ ACPI_BUS_REMOVAL_EJECT,
+ ACPI_BUS_REMOVAL_SUPRISE,
+ ACPI_BUS_REMOVAL_TYPE_COUNT
+};
+
+enum acpi_bus_device_type {
+ ACPI_BUS_TYPE_DEVICE = 0,
+ ACPI_BUS_TYPE_POWER,
+ ACPI_BUS_TYPE_PROCESSOR,
+ ACPI_BUS_TYPE_THERMAL,
+ ACPI_BUS_TYPE_SYSTEM,
+ ACPI_BUS_TYPE_POWER_BUTTON,
+ ACPI_BUS_TYPE_SLEEP_BUTTON,
+ ACPI_BUS_DEVICE_TYPE_COUNT
+};
+
+struct acpi_driver;
+struct acpi_device;
+
+/*
+ * ACPI Driver
+ * -----------
+ */
+
+typedef int (*acpi_op_add) (struct acpi_device * device);
+typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
+typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
+typedef int (*acpi_op_start) (struct acpi_device * device);
+typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
+typedef int (*acpi_op_suspend) (struct acpi_device * device, int state);
+typedef int (*acpi_op_resume) (struct acpi_device * device, int state);
+typedef int (*acpi_op_scan) (struct acpi_device * device);
+typedef int (*acpi_op_bind) (struct acpi_device * device);
+typedef int (*acpi_op_unbind) (struct acpi_device * device);
+typedef int (*acpi_op_match) (struct acpi_device * device,
+ struct acpi_driver * driver);
+
+struct acpi_bus_ops {
+ u32 acpi_op_add:1;
+ u32 acpi_op_remove:1;
+ u32 acpi_op_lock:1;
+ u32 acpi_op_start:1;
+ u32 acpi_op_stop:1;
+ u32 acpi_op_suspend:1;
+ u32 acpi_op_resume:1;
+ u32 acpi_op_scan:1;
+ u32 acpi_op_bind:1;
+ u32 acpi_op_unbind:1;
+ u32 acpi_op_match:1;
+ u32 reserved:21;
+};
+
+struct acpi_device_ops {
+ acpi_op_add add;
+ acpi_op_remove remove;
+ acpi_op_lock lock;
+ acpi_op_start start;
+ acpi_op_stop stop;
+ acpi_op_suspend suspend;
+ acpi_op_resume resume;
+ acpi_op_scan scan;
+ acpi_op_bind bind;
+ acpi_op_unbind unbind;
+ acpi_op_match match;
+};
+
+struct acpi_driver {
+ struct list_head node;
+ char name[80];
+ char class[80];
+ atomic_t references;
+ char *ids; /* Supported Hardware IDs */
+ struct acpi_device_ops ops;
+};
+
+/*
+ * ACPI Device
+ * -----------
+ */
+
+/* Status (_STA) */
+
+struct acpi_device_status {
+ u32 present:1;
+ u32 enabled:1;
+ u32 show_in_ui:1;
+ u32 functional:1;
+ u32 battery_present:1;
+ u32 reserved:27;
+};
+
+/* Flags */
+
+struct acpi_device_flags {
+ u32 dynamic_status:1;
+ u32 hardware_id:1;
+ u32 compatible_ids:1;
+ u32 bus_address:1;
+ u32 unique_id:1;
+ u32 removable:1;
+ u32 ejectable:1;
+ u32 lockable:1;
+ u32 suprise_removal_ok:1;
+ u32 power_manageable:1;
+ u32 performance_manageable:1;
+ u32 wake_capable:1; /* Wakeup(_PRW) supported? */
+ u32 force_power_state:1;
+ u32 reserved:19;
+};
+
+/* File System */
+
+struct acpi_device_dir {
+ struct proc_dir_entry *entry;
+};
+
+#define acpi_device_dir(d) ((d)->dir.entry)
+
+/* Plug and Play */
+
+typedef char acpi_bus_id[5];
+typedef unsigned long acpi_bus_address;
+typedef char acpi_hardware_id[9];
+typedef char acpi_unique_id[9];
+typedef char acpi_device_name[40];
+typedef char acpi_device_class[20];
+
+struct acpi_device_pnp {
+ acpi_bus_id bus_id; /* Object name */
+ acpi_bus_address bus_address; /* _ADR */
+ acpi_hardware_id hardware_id; /* _HID */
+ struct acpi_compatible_id_list *cid_list; /* _CIDs */
+ acpi_unique_id unique_id; /* _UID */
+ acpi_device_name device_name; /* Driver-determined */
+ acpi_device_class device_class; /* " */
+};
+
+#define acpi_device_bid(d) ((d)->pnp.bus_id)
+#define acpi_device_adr(d) ((d)->pnp.bus_address)
+#define acpi_device_hid(d) ((d)->pnp.hardware_id)
+#define acpi_device_uid(d) ((d)->pnp.unique_id)
+#define acpi_device_name(d) ((d)->pnp.device_name)
+#define acpi_device_class(d) ((d)->pnp.device_class)
+
+/* Power Management */
+
+struct acpi_device_power_flags {
+ u32 explicit_get:1; /* _PSC present? */
+ u32 power_resources:1; /* Power resources */
+ u32 inrush_current:1; /* Serialize Dx->D0 */
+ u32 power_removed:1; /* Optimize Dx->D0 */
+ u32 reserved:28;
+};
+
+struct acpi_device_power_state {
+ struct {
+ u8 valid:1;
+ u8 explicit_set:1; /* _PSx present? */
+ u8 reserved:6;
+ } flags;
+ int power; /* % Power (compared to D0) */
+ int latency; /* Dx->D0 time (microseconds) */
+ struct acpi_handle_list resources; /* Power resources referenced */
+};
+
+struct acpi_device_power {
+ int state; /* Current state */
+ struct acpi_device_power_flags flags;
+ struct acpi_device_power_state states[4]; /* Power states (D0-D3) */
+};
+
+/* Performance Management */
+
+struct acpi_device_perf_flags {
+ u8 reserved:8;
+};
+
+struct acpi_device_perf_state {
+ struct {
+ u8 valid:1;
+ u8 reserved:7;
+ } flags;
+ u8 power; /* % Power (compared to P0) */
+ u8 performance; /* % Performance ( " ) */
+ int latency; /* Px->P0 time (microseconds) */
+};
+
+struct acpi_device_perf {
+ int state;
+ struct acpi_device_perf_flags flags;
+ int state_count;
+ struct acpi_device_perf_state *states;
+};
+
+/* Wakeup Management */
+struct acpi_device_wakeup_flags {
+ u8 valid:1; /* Can successfully enable wakeup? */
+ u8 run_wake:1; /* Run-Wake GPE devices */
+};
+
+struct acpi_device_wakeup_state {
+ u8 enabled:1;
+ u8 active:1;
+};
+
+struct acpi_device_wakeup {
+ acpi_handle gpe_device;
+ acpi_integer gpe_number;
+ acpi_integer sleep_state;
+ struct acpi_handle_list resources;
+ struct acpi_device_wakeup_state state;
+ struct acpi_device_wakeup_flags flags;
+};
+
+/* Device */
+
+struct acpi_device {
+ acpi_handle handle;
+ struct acpi_device *parent;
+ struct list_head children;
+ struct list_head node;
+ struct list_head wakeup_list;
+ struct list_head g_list;
+ struct acpi_device_status status;
+ struct acpi_device_flags flags;
+ struct acpi_device_pnp pnp;
+ struct acpi_device_power power;
+ struct acpi_device_wakeup wakeup;
+ struct acpi_device_perf performance;
+ struct acpi_device_dir dir;
+ struct acpi_device_ops ops;
+ struct acpi_driver *driver;
+ void *driver_data;
+ struct kobject kobj;
+ struct device dev;
+};
+
+#define acpi_driver_data(d) ((d)->driver_data)
+
+/*
+ * Events
+ * ------
+ */
+
+struct acpi_bus_event {
+ struct list_head node;
+ acpi_device_class device_class;
+ acpi_bus_id bus_id;
+ u32 type;
+ u32 data;
+};
+
+extern struct subsystem acpi_subsys;
+
+/*
+ * External Functions
+ */
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
+void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context);
+int acpi_bus_get_status(struct acpi_device *device);
+int acpi_bus_get_power(acpi_handle handle, int *state);
+int acpi_bus_set_power(acpi_handle handle, int state);
+int acpi_bus_generate_event(struct acpi_device *device, u8 type, int data);
+int acpi_bus_receive_event(struct acpi_bus_event *event);
+int acpi_bus_register_driver(struct acpi_driver *driver);
+void acpi_bus_unregister_driver(struct acpi_driver *driver);
+int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent,
+ acpi_handle handle, int type);
+int acpi_bus_trim(struct acpi_device *start, int rmdevice);
+int acpi_bus_start(struct acpi_device *device);
+acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd);
+int acpi_match_ids(struct acpi_device *device, char *ids);
+int acpi_create_dir(struct acpi_device *);
+void acpi_remove_dir(struct acpi_device *);
+
+/*
+ * Bind physical devices with ACPI devices
+ */
+#include <linux/device.h>
+struct acpi_bus_type {
+ struct list_head list;
+ struct bus_type *bus;
+ /* For general devices under the bus */
+ int (*find_device) (struct device *, acpi_handle *);
+ /* For bridges, such as PCI root bridge, IDE controller */
+ int (*find_bridge) (struct device *, acpi_handle *);
+};
+int register_acpi_bus_type(struct acpi_bus_type *);
+int unregister_acpi_bus_type(struct acpi_bus_type *);
+struct device *acpi_get_physical_device(acpi_handle);
+/* helper */
+acpi_handle acpi_get_child(acpi_handle, acpi_integer);
+acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
+#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->firmware_data))
+
+#endif /* CONFIG_ACPI */
+
+#endif /*__ACPI_BUS_H__*/
--- /dev/null
+/*
+ * acpi_drivers.h ($Revision: 31 $)
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __ACPI_DRIVERS_H__
+#define __ACPI_DRIVERS_H__
+
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+#define ACPI_MAX_STRING 80
+
+#define ACPI_BUS_COMPONENT 0x00010000
+#define ACPI_SYSTEM_COMPONENT 0x02000000
+
+/* _HID definitions */
+
+#define ACPI_POWER_HID "ACPI_PWR"
+#define ACPI_PROCESSOR_HID "ACPI_CPU"
+#define ACPI_SYSTEM_HID "ACPI_SYS"
+#define ACPI_THERMAL_HID "ACPI_THM"
+#define ACPI_BUTTON_HID_POWERF "ACPI_FPB"
+#define ACPI_BUTTON_HID_SLEEPF "ACPI_FSB"
+
+/* --------------------------------------------------------------------------
+ PCI
+ -------------------------------------------------------------------------- */
+
+#define ACPI_PCI_COMPONENT 0x00400000
+
+/* ACPI PCI Interrupt Link (pci_link.c) */
+
+int acpi_irq_penalty_init(void);
+int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
+ int *polarity, char **name);
+int acpi_pci_link_free_irq(acpi_handle handle);
+
+/* ACPI PCI Interrupt Routing (pci_irq.c) */
+
+int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus);
+void acpi_pci_irq_del_prt(int segment, int bus);
+
+/* ACPI PCI Device Binding (pci_bind.c) */
+
+struct pci_bus;
+
+acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id);
+int acpi_pci_bind(struct acpi_device *device);
+int acpi_pci_unbind(struct acpi_device *device);
+int acpi_pci_bind_root(struct acpi_device *device, struct acpi_pci_id *id,
+ struct pci_bus *bus);
+
+/* Arch-defined function to add a bus to the system */
+
+struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
+ int bus);
+
+/* --------------------------------------------------------------------------
+ Power Resource
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_POWER
+int acpi_enable_wakeup_device_power(struct acpi_device *dev);
+int acpi_disable_wakeup_device_power(struct acpi_device *dev);
+int acpi_power_get_inferred_state(struct acpi_device *device);
+int acpi_power_transition(struct acpi_device *device, int state);
+#endif
+
+/* --------------------------------------------------------------------------
+ Embedded Controller
+ -------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_EC
+int acpi_ec_ecdt_probe(void);
+#endif
+
+/* --------------------------------------------------------------------------
+ Processor
+ -------------------------------------------------------------------------- */
+
+#define ACPI_PROCESSOR_LIMIT_NONE 0x00
+#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01
+#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02
+
+int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
+
+/* --------------------------------------------------------------------------
+ Hot Keys
+ -------------------------------------------------------------------------- */
+
+extern int acpi_specific_hotkey_enabled;
+
+/*--------------------------------------------------------------------------
+ Dock Station
+ -------------------------------------------------------------------------- */
+#if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
+extern int is_dock_device(acpi_handle handle);
+extern int register_dock_notifier(struct notifier_block *nb);
+extern void unregister_dock_notifier(struct notifier_block *nb);
+extern int register_hotplug_dock_device(acpi_handle handle,
+ acpi_notify_handler handler, void *context);
+extern void unregister_hotplug_dock_device(acpi_handle handle);
+#else
+#define is_dock_device(h) (0)
+#define register_dock_notifier(nb) (-ENODEV)
+#define unregister_dock_notifier(nb) do { } while(0)
+#define register_hotplug_dock_device(h1, h2, c) (-ENODEV)
+#define unregister_hotplug_dock_device(h) do { } while(0)
+#endif
+#endif /*__ACPI_DRIVERS_H__*/
--- /dev/null
+#ifndef __ACPI_NUMA_H
+#define __ACPI_NUMA_H
+
+#ifdef CONFIG_ACPI_NUMA
+#include <linux/kernel.h>
+
+/* Proximity bitmap length */
+#if MAX_NUMNODES > 256
+#define MAX_PXM_DOMAINS MAX_NUMNODES
+#else
+#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
+#endif
+
+extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];
+extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES];
+
+extern int __cpuinit pxm_to_node(int);
+extern int __cpuinit node_to_pxm(int);
+extern int __cpuinit acpi_map_pxm_to_node(int);
+extern void __cpuinit acpi_unmap_pxm_to_node(int);
+
+#endif /* CONFIG_ACPI_NUMA */
+#endif /* __ACP_NUMA_H */
--- /dev/null
+
+/******************************************************************************
+ *
+ * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These
+ * interfaces must be implemented by OSL to interface the
+ * ACPI components to the host operating system.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACPIOSXF_H__
+#define __ACPIOSXF_H__
+
+#include "platform/acenv.h"
+#include "actypes.h"
+
+/* Types for acpi_os_execute */
+
+typedef enum {
+ OSL_GLOBAL_LOCK_HANDLER,
+ OSL_NOTIFY_HANDLER,
+ OSL_GPE_HANDLER,
+ OSL_DEBUGGER_THREAD,
+ OSL_EC_POLL_HANDLER,
+ OSL_EC_BURST_HANDLER
+} acpi_execute_type;
+
+#define ACPI_NO_UNIT_LIMIT ((u32) -1)
+#define ACPI_MUTEX_SEM 1
+
+/* Functions for acpi_os_signal */
+
+#define ACPI_SIGNAL_FATAL 0
+#define ACPI_SIGNAL_BREAKPOINT 1
+
+struct acpi_signal_fatal_info {
+ u32 type;
+ u32 code;
+ u32 argument;
+};
+
+/*
+ * OSL Initialization and shutdown primitives
+ */
+acpi_status acpi_os_initialize(void);
+
+acpi_status acpi_os_terminate(void);
+
+/*
+ * ACPI Table interfaces
+ */
+acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *address);
+
+acpi_status
+acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
+ acpi_string * new_val);
+
+acpi_status
+acpi_os_table_override(struct acpi_table_header *existing_table,
+ struct acpi_table_header **new_table);
+
+/*
+ * Spinlock primitives
+ */
+acpi_status acpi_os_create_lock(acpi_spinlock * out_handle);
+
+void acpi_os_delete_lock(acpi_spinlock handle);
+
+acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
+
+void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags);
+
+/*
+ * Semaphore primitives
+ */
+acpi_status
+acpi_os_create_semaphore(u32 max_units,
+ u32 initial_units, acpi_semaphore * out_handle);
+
+acpi_status acpi_os_delete_semaphore(acpi_semaphore handle);
+
+acpi_status
+acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout);
+
+acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units);
+
+/*
+ * Mutex primitives
+ */
+acpi_status acpi_os_create_mutex(acpi_mutex * out_handle);
+
+void acpi_os_delete_mutex(acpi_mutex handle);
+
+acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout);
+
+void acpi_os_release_mutex(acpi_mutex handle);
+
+/* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */
+
+#define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle)
+#define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle)
+#define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time)
+#define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1)
+
+/*
+ * Memory allocation and mapping
+ */
+void *acpi_os_allocate(acpi_size size);
+
+acpi_status
+acpi_os_map_memory(acpi_physical_address physical_address,
+ acpi_size size, void __iomem ** logical_address);
+
+void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_os_get_physical_address(void *logical_address,
+ acpi_physical_address * physical_address);
+#endif
+
+/*
+ * Memory/Object Cache
+ */
+acpi_status
+acpi_os_create_cache(char *cache_name,
+ u16 object_size,
+ u16 max_depth, acpi_cache_t ** return_cache);
+
+acpi_status acpi_os_delete_cache(acpi_cache_t * cache);
+
+acpi_status acpi_os_purge_cache(acpi_cache_t * cache);
+
+void *acpi_os_acquire_object(acpi_cache_t * cache);
+
+acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object);
+
+/*
+ * Interrupt handlers
+ */
+acpi_status
+acpi_os_install_interrupt_handler(u32 gsi,
+ acpi_osd_handler service_routine,
+ void *context);
+
+acpi_status
+acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler service_routine);
+
+/*
+ * Threads and Scheduling
+ */
+acpi_thread_id acpi_os_get_thread_id(void);
+
+acpi_status
+acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context);
+
+void acpi_os_wait_events_complete(void *context);
+
+void acpi_os_sleep(acpi_integer milliseconds);
+
+void acpi_os_stall(u32 microseconds);
+
+/*
+ * Platform and hardware-independent I/O interfaces
+ */
+acpi_status acpi_os_read_port(acpi_io_address address, u32 * value, u32 width);
+
+acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
+
+/*
+ * Platform and hardware-independent physical memory interfaces
+ */
+acpi_status
+acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width);
+
+acpi_status
+acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
+
+/*
+ * Platform and hardware-independent PCI configuration space access
+ * Note: Can't use "Register" as a parameter, changed to "Reg" --
+ * certain compilers complain.
+ */
+acpi_status
+acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
+ u32 reg, void *value, u32 width);
+
+acpi_status
+acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
+ u32 reg, acpi_integer value, u32 width);
+
+/*
+ * Interim function needed for PCI IRQ routing
+ */
+void
+acpi_os_derive_pci_id(acpi_handle rhandle,
+ acpi_handle chandle, struct acpi_pci_id **pci_id);
+
+/*
+ * Miscellaneous
+ */
+acpi_status acpi_os_validate_interface(char *interface);
+
+acpi_status
+acpi_os_validate_address(u8 space_id,
+ acpi_physical_address address, acpi_size length);
+
+u8 acpi_os_readable(void *pointer, acpi_size length);
+
+#ifdef ACPI_FUTURE_USAGE
+u8 acpi_os_writable(void *pointer, acpi_size length);
+#endif
+
+u64 acpi_os_get_timer(void);
+
+acpi_status acpi_os_signal(u32 function, void *info);
+
+/*
+ * Debug print routines
+ */
+void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...);
+
+void acpi_os_vprintf(const char *format, va_list args);
+
+void acpi_os_redirect_output(void *destination);
+
+#ifdef ACPI_FUTURE_USAGE
+/*
+ * Debug input
+ */
+u32 acpi_os_get_line(char *buffer);
+#endif
+
+/*
+ * Directory manipulation
+ */
+void *acpi_os_open_directory(char *pathname,
+ char *wildcard_spec, char requested_file_type);
+
+/* requeste_file_type values */
+
+#define REQUEST_FILE_ONLY 0
+#define REQUEST_DIR_ONLY 1
+
+char *acpi_os_get_next_filename(void *dir_handle);
+
+void acpi_os_close_directory(void *dir_handle);
+
+#endif /* __ACPIOSXF_H__ */
--- /dev/null
+
+/******************************************************************************
+ *
+ * Name: acpixf.h - External interfaces to the ACPI subsystem
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACXFACE_H__
+#define __ACXFACE_H__
+
+#include "actypes.h"
+#include "actbl.h"
+
+/*
+ * Global interfaces
+ */
+acpi_status acpi_initialize_subsystem(void);
+
+acpi_status acpi_enable_subsystem(u32 flags);
+
+acpi_status acpi_initialize_objects(u32 flags);
+
+acpi_status acpi_terminate(void);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_subsystem_status(void);
+#endif
+
+acpi_status acpi_enable(void);
+
+acpi_status acpi_disable(void);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
+#endif
+
+const char *acpi_format_exception(acpi_status exception);
+
+acpi_status acpi_purge_cached_objects(void);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
+#endif
+
+/*
+ * ACPI Memory managment
+ */
+void *acpi_allocate(u32 size);
+
+void *acpi_callocate(u32 size);
+
+void acpi_free(void *address);
+
+/*
+ * ACPI table manipulation interfaces
+ */
+acpi_status
+acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address);
+
+acpi_status acpi_load_tables(void);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_load_table(struct acpi_table_header *table_ptr);
+
+acpi_status acpi_unload_table(acpi_table_type table_type);
+
+acpi_status
+acpi_get_table_header(acpi_table_type table_type,
+ u32 instance, struct acpi_table_header *out_table_header);
+#endif /* ACPI_FUTURE_USAGE */
+
+acpi_status
+acpi_get_table(acpi_table_type table_type,
+ u32 instance, struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_get_firmware_table(acpi_string signature,
+ u32 instance,
+ u32 flags, struct acpi_table_header **table_pointer);
+
+/*
+ * Namespace and name interfaces
+ */
+acpi_status
+acpi_walk_namespace(acpi_object_type type,
+ acpi_handle start_object,
+ u32 max_depth,
+ acpi_walk_callback user_function,
+ void *context, void **return_value);
+
+acpi_status
+acpi_get_devices(char *HID,
+ acpi_walk_callback user_function,
+ void *context, void **return_value);
+
+acpi_status
+acpi_get_name(acpi_handle handle,
+ u32 name_type, struct acpi_buffer *ret_path_ptr);
+
+acpi_status
+acpi_get_handle(acpi_handle parent,
+ acpi_string pathname, acpi_handle * ret_handle);
+
+acpi_status
+acpi_attach_data(acpi_handle obj_handle,
+ acpi_object_handler handler, void *data);
+
+acpi_status
+acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler);
+
+acpi_status
+acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data);
+
+acpi_status
+acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags);
+
+/*
+ * Object manipulation and enumeration
+ */
+acpi_status
+acpi_evaluate_object(acpi_handle object,
+ acpi_string pathname,
+ struct acpi_object_list *parameter_objects,
+ struct acpi_buffer *return_object_buffer);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_evaluate_object_typed(acpi_handle object,
+ acpi_string pathname,
+ struct acpi_object_list *external_params,
+ struct acpi_buffer *return_buffer,
+ acpi_object_type return_type);
+#endif
+
+acpi_status
+acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer);
+
+acpi_status
+acpi_get_next_object(acpi_object_type type,
+ acpi_handle parent,
+ acpi_handle child, acpi_handle * out_handle);
+
+acpi_status acpi_get_type(acpi_handle object, acpi_object_type * out_type);
+
+acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle);
+
+/*
+ * Event handler interfaces
+ */
+acpi_status
+acpi_install_fixed_event_handler(u32 acpi_event,
+ acpi_event_handler handler, void *context);
+
+acpi_status
+acpi_remove_fixed_event_handler(u32 acpi_event, acpi_event_handler handler);
+
+acpi_status
+acpi_install_notify_handler(acpi_handle device,
+ u32 handler_type,
+ acpi_notify_handler handler, void *context);
+
+acpi_status
+acpi_remove_notify_handler(acpi_handle device,
+ u32 handler_type, acpi_notify_handler handler);
+
+acpi_status
+acpi_install_address_space_handler(acpi_handle device,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context);
+
+acpi_status
+acpi_remove_address_space_handler(acpi_handle device,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler);
+
+acpi_status
+acpi_install_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 type, acpi_event_handler address, void *context);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
+#endif
+
+/*
+ * Event interfaces
+ */
+acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
+
+acpi_status acpi_release_global_lock(u32 handle);
+
+acpi_status
+acpi_remove_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number, acpi_event_handler address);
+
+acpi_status acpi_enable_event(u32 event, u32 flags);
+
+acpi_status acpi_disable_event(u32 event, u32 flags);
+
+acpi_status acpi_clear_event(u32 event);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
+#endif /* ACPI_FUTURE_USAGE */
+
+acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type);
+
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags);
+
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags);
+
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 flags, acpi_event_status * event_status);
+#endif /* ACPI_FUTURE_USAGE */
+
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+ struct acpi_generic_address *gpe_block_address,
+ u32 register_count, u32 interrupt_number);
+
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
+
+/*
+ * Resource interfaces
+ */
+typedef
+acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
+ void *context);
+
+acpi_status
+acpi_get_vendor_resource(acpi_handle device_handle,
+ char *name,
+ struct acpi_vendor_uuid *uuid,
+ struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_get_current_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_get_possible_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer);
+#endif
+
+acpi_status
+acpi_walk_resources(acpi_handle device_handle,
+ char *name,
+ acpi_walk_resource_callback user_function, void *context);
+
+acpi_status
+acpi_set_current_resources(acpi_handle device_handle,
+ struct acpi_buffer *in_buffer);
+
+acpi_status
+acpi_get_irq_routing_table(acpi_handle bus_device_handle,
+ struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_resource_to_address64(struct acpi_resource *resource,
+ struct acpi_resource_address64 *out);
+
+/*
+ * Hardware (ACPI device) interfaces
+ */
+acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags);
+
+acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags);
+
+acpi_status
+acpi_set_firmware_waking_vector(acpi_physical_address physical_address);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_get_firmware_waking_vector(acpi_physical_address * physical_address);
+#endif
+
+acpi_status
+acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
+
+acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
+
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
+
+acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void);
+
+acpi_status acpi_leave_sleep_state(u8 sleep_state);
+
+#endif /* __ACXFACE_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acresrc.h - Resource Manager function prototypes
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACRESRC_H__
+#define __ACRESRC_H__
+
+/* Need the AML resource descriptor structs */
+
+#include "amlresrc.h"
+
+/*
+ * If possible, pack the following structures to byte alignment, since we
+ * don't care about performance for debug output. Two cases where we cannot
+ * pack the structures:
+ *
+ * 1) Hardware does not support misaligned memory transfers
+ * 2) Compiler does not support pointers within packed structures
+ */
+#if (!defined(ACPI_MISALIGNMENT_NOT_SUPPORTED) && !defined(ACPI_PACKED_POINTERS_NOT_SUPPORTED))
+#pragma pack(1)
+#endif
+
+/*
+ * Individual entry for the resource conversion tables
+ */
+typedef const struct acpi_rsconvert_info {
+ u8 opcode;
+ u8 resource_offset;
+ u8 aml_offset;
+ u8 value;
+
+} acpi_rsconvert_info;
+
+/* Resource conversion opcodes */
+
+#define ACPI_RSC_INITGET 0
+#define ACPI_RSC_INITSET 1
+#define ACPI_RSC_FLAGINIT 2
+#define ACPI_RSC_1BITFLAG 3
+#define ACPI_RSC_2BITFLAG 4
+#define ACPI_RSC_COUNT 5
+#define ACPI_RSC_COUNT16 6
+#define ACPI_RSC_LENGTH 7
+#define ACPI_RSC_MOVE8 8
+#define ACPI_RSC_MOVE16 9
+#define ACPI_RSC_MOVE32 10
+#define ACPI_RSC_MOVE64 11
+#define ACPI_RSC_SET8 12
+#define ACPI_RSC_DATA8 13
+#define ACPI_RSC_ADDRESS 14
+#define ACPI_RSC_SOURCE 15
+#define ACPI_RSC_SOURCEX 16
+#define ACPI_RSC_BITMASK 17
+#define ACPI_RSC_BITMASK16 18
+#define ACPI_RSC_EXIT_NE 19
+#define ACPI_RSC_EXIT_LE 20
+
+/* Resource Conversion sub-opcodes */
+
+#define ACPI_RSC_COMPARE_AML_LENGTH 0
+#define ACPI_RSC_COMPARE_VALUE 1
+
+#define ACPI_RSC_TABLE_SIZE(d) (sizeof (d) / sizeof (struct acpi_rsconvert_info))
+
+#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f)
+#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f)
+
+typedef const struct acpi_rsdump_info {
+ u8 opcode;
+ u8 offset;
+ char *name;
+ const char **pointer;
+
+} acpi_rsdump_info;
+
+/* Values for the Opcode field above */
+
+#define ACPI_RSD_TITLE 0
+#define ACPI_RSD_LITERAL 1
+#define ACPI_RSD_STRING 2
+#define ACPI_RSD_UINT8 3
+#define ACPI_RSD_UINT16 4
+#define ACPI_RSD_UINT32 5
+#define ACPI_RSD_UINT64 6
+#define ACPI_RSD_1BITFLAG 7
+#define ACPI_RSD_2BITFLAG 8
+#define ACPI_RSD_SHORTLIST 9
+#define ACPI_RSD_LONGLIST 10
+#define ACPI_RSD_DWORDLIST 11
+#define ACPI_RSD_ADDRESS 12
+#define ACPI_RSD_SOURCE 13
+
+/* restore default alignment */
+
+#pragma pack()
+
+/* Resource tables indexed by internal resource type */
+
+extern const u8 acpi_gbl_aml_resource_sizes[];
+extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
+
+/* Resource tables indexed by raw AML resource descriptor type */
+
+extern const u8 acpi_gbl_resource_struct_sizes[];
+extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
+
+struct acpi_vendor_walk_info {
+ struct acpi_vendor_uuid *uuid;
+ struct acpi_buffer *buffer;
+ acpi_status status;
+};
+
+/*
+ * rscreate
+ */
+acpi_status
+acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
+ struct acpi_buffer *output_buffer);
+
+acpi_status
+acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
+ struct acpi_buffer *output_buffer);
+
+acpi_status
+acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
+ struct acpi_buffer *output_buffer);
+
+/*
+ * rsutils
+ */
+
+acpi_status
+acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_rs_get_method_data(acpi_handle handle,
+ char *path, struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
+
+/*
+ * rscalc
+ */
+acpi_status
+acpi_rs_get_list_length(u8 * aml_buffer,
+ u32 aml_buffer_length, acpi_size * size_needed);
+
+acpi_status
+acpi_rs_get_aml_length(struct acpi_resource *linked_list_buffer,
+ acpi_size * size_needed);
+
+acpi_status
+acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
+ acpi_size * buffer_size_needed);
+
+acpi_status
+acpi_rs_convert_aml_to_resources(u8 * aml,
+ u32 length,
+ u32 offset, u8 resource_index, void **context);
+
+acpi_status
+acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
+ acpi_size aml_size_needed, u8 * output_buffer);
+
+/*
+ * rsaddr
+ */
+void
+acpi_rs_set_address_common(union aml_resource *aml,
+ struct acpi_resource *resource);
+
+u8
+acpi_rs_get_address_common(struct acpi_resource *resource,
+ union aml_resource *aml);
+
+/*
+ * rsmisc
+ */
+acpi_status
+acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
+ union aml_resource *aml,
+ struct acpi_rsconvert_info *info);
+
+acpi_status
+acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
+ union aml_resource *aml,
+ struct acpi_rsconvert_info *info);
+
+/*
+ * rsutils
+ */
+void
+acpi_rs_move_data(void *destination,
+ void *source, u16 item_count, u8 move_type);
+
+u8 acpi_rs_decode_bitmask(u16 mask, u8 * list);
+
+u16 acpi_rs_encode_bitmask(u8 * list, u8 count);
+
+acpi_rs_length
+acpi_rs_get_resource_source(acpi_rs_length resource_length,
+ acpi_rs_length minimum_length,
+ struct acpi_resource_source *resource_source,
+ union aml_resource *aml, char *string_ptr);
+
+acpi_rsdesc_size
+acpi_rs_set_resource_source(union aml_resource *aml,
+ acpi_rs_length minimum_length,
+ struct acpi_resource_source *resource_source);
+
+void
+acpi_rs_set_resource_header(u8 descriptor_type,
+ acpi_rsdesc_size total_length,
+ union aml_resource *aml);
+
+void
+acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
+ union aml_resource *aml);
+
+/*
+ * rsdump
+ */
+void acpi_rs_dump_resource_list(struct acpi_resource *resource);
+
+void acpi_rs_dump_irq_list(u8 * route_table);
+
+/*
+ * Resource conversion tables
+ */
+extern struct acpi_rsconvert_info acpi_rs_convert_dma[];
+extern struct acpi_rsconvert_info acpi_rs_convert_end_dpf[];
+extern struct acpi_rsconvert_info acpi_rs_convert_io[];
+extern struct acpi_rsconvert_info acpi_rs_convert_fixed_io[];
+extern struct acpi_rsconvert_info acpi_rs_convert_end_tag[];
+extern struct acpi_rsconvert_info acpi_rs_convert_memory24[];
+extern struct acpi_rsconvert_info acpi_rs_convert_generic_reg[];
+extern struct acpi_rsconvert_info acpi_rs_convert_memory32[];
+extern struct acpi_rsconvert_info acpi_rs_convert_fixed_memory32[];
+extern struct acpi_rsconvert_info acpi_rs_convert_address32[];
+extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
+extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
+extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
+extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
+
+/* These resources require separate get/set tables */
+
+extern struct acpi_rsconvert_info acpi_rs_get_irq[];
+extern struct acpi_rsconvert_info acpi_rs_get_start_dpf[];
+extern struct acpi_rsconvert_info acpi_rs_get_vendor_small[];
+extern struct acpi_rsconvert_info acpi_rs_get_vendor_large[];
+
+extern struct acpi_rsconvert_info acpi_rs_set_irq[];
+extern struct acpi_rsconvert_info acpi_rs_set_start_dpf[];
+extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/*
+ * rsinfo
+ */
+extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
+
+/*
+ * rsdump
+ */
+extern struct acpi_rsdump_info acpi_rs_dump_irq[];
+extern struct acpi_rsdump_info acpi_rs_dump_dma[];
+extern struct acpi_rsdump_info acpi_rs_dump_start_dpf[];
+extern struct acpi_rsdump_info acpi_rs_dump_end_dpf[];
+extern struct acpi_rsdump_info acpi_rs_dump_io[];
+extern struct acpi_rsdump_info acpi_rs_dump_fixed_io[];
+extern struct acpi_rsdump_info acpi_rs_dump_vendor[];
+extern struct acpi_rsdump_info acpi_rs_dump_end_tag[];
+extern struct acpi_rsdump_info acpi_rs_dump_memory24[];
+extern struct acpi_rsdump_info acpi_rs_dump_memory32[];
+extern struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[];
+extern struct acpi_rsdump_info acpi_rs_dump_address16[];
+extern struct acpi_rsdump_info acpi_rs_dump_address32[];
+extern struct acpi_rsdump_info acpi_rs_dump_address64[];
+extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
+extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
+extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
+#endif
+
+#endif /* __ACRESRC_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acstruct.h - Internal structs
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACSTRUCT_H__
+#define __ACSTRUCT_H__
+
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
+/*****************************************************************************
+ *
+ * Tree walking typedefs and structs
+ *
+ ****************************************************************************/
+
+/*
+ * Walk state - current state of a parse tree walk. Used for both a leisurely
+ * stroll through the tree (for whatever reason), and for control method
+ * execution.
+ */
+#define ACPI_NEXT_OP_DOWNWARD 1
+#define ACPI_NEXT_OP_UPWARD 2
+
+/*
+ * Groups of definitions for walk_type used for different implementations of
+ * walkers (never simultaneously) - flags for interpreter:
+ */
+#define ACPI_WALK_NON_METHOD 0
+#define ACPI_WALK_METHOD 0x01
+#define ACPI_WALK_METHOD_RESTART 0x02
+
+/* Flags for i_aSL compiler only */
+
+#define ACPI_WALK_CONST_REQUIRED 0x10
+#define ACPI_WALK_CONST_OPTIONAL 0x20
+
+struct acpi_walk_state {
+ struct acpi_walk_state *next; /* Next walk_state in list */
+ u8 descriptor_type; /* To differentiate various internal objs */
+ u8 walk_type;
+ u16 opcode; /* Current AML opcode */
+ u8 next_op_info; /* Info about next_op */
+ u8 num_operands; /* Stack pointer for Operands[] array */
+ acpi_owner_id owner_id; /* Owner of objects created during the walk */
+ u8 last_predicate; /* Result of last predicate */
+ u8 current_result;
+ u8 return_used;
+ u8 scope_depth;
+ u8 pass_number; /* Parse pass during table load */
+ u32 aml_offset;
+ u32 arg_types;
+ u32 method_breakpoint; /* For single stepping */
+ u32 user_breakpoint; /* User AML breakpoint */
+ u32 parse_flags;
+
+ struct acpi_parse_state parser_state; /* Current state of parser */
+ u32 prev_arg_types;
+ u32 arg_count; /* push for fixed or var args */
+
+ struct acpi_namespace_node arguments[ACPI_METHOD_NUM_ARGS]; /* Control method arguments */
+ struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */
+ union acpi_operand_object *operands[ACPI_OBJ_NUM_OPERANDS + 1]; /* Operands passed to the interpreter (+1 for NULL terminator) */
+ union acpi_operand_object **params;
+
+ u8 *aml_last_while;
+ union acpi_operand_object **caller_return_desc;
+ union acpi_generic_state *control_state; /* List of control states (nested IFs) */
+ struct acpi_namespace_node *deferred_node; /* Used when executing deferred opcodes */
+ struct acpi_gpe_event_info *gpe_event_info; /* Info for GPE (_Lxx/_Exx methods only */
+ union acpi_operand_object *implicit_return_obj;
+ struct acpi_namespace_node *method_call_node; /* Called method Node */
+ union acpi_parse_object *method_call_op; /* method_call Op if running a method */
+ union acpi_operand_object *method_desc; /* Method descriptor if running a method */
+ struct acpi_namespace_node *method_node; /* Method node if running a method. */
+ union acpi_parse_object *op; /* Current parser op */
+ const struct acpi_opcode_info *op_info; /* Info on current opcode */
+ union acpi_parse_object *origin; /* Start of walk [Obsolete] */
+ union acpi_operand_object *result_obj;
+ union acpi_generic_state *results; /* Stack of accumulated results */
+ union acpi_operand_object *return_desc; /* Return object, if any */
+ union acpi_generic_state *scope_info; /* Stack of nested scopes */
+ union acpi_parse_object *prev_op; /* Last op that was processed */
+ union acpi_parse_object *next_op; /* next op to be processed */
+ struct acpi_thread_state *thread;
+ acpi_parse_downwards descending_callback;
+ acpi_parse_upwards ascending_callback;
+};
+
+/* Info used by acpi_ps_init_objects */
+
+struct acpi_init_walk_info {
+ u16 method_count;
+ u16 device_count;
+ u16 op_region_count;
+ u16 field_count;
+ u16 buffer_count;
+ u16 package_count;
+ u16 op_region_init;
+ u16 field_init;
+ u16 buffer_init;
+ u16 package_init;
+ u16 object_count;
+ struct acpi_table_desc *table_desc;
+};
+
+struct acpi_get_devices_info {
+ acpi_walk_callback user_function;
+ void *context;
+ char *hid;
+};
+
+union acpi_aml_operands {
+ union acpi_operand_object *operands[7];
+
+ struct {
+ struct acpi_object_integer *type;
+ struct acpi_object_integer *code;
+ struct acpi_object_integer *argument;
+
+ } fatal;
+
+ struct {
+ union acpi_operand_object *source;
+ struct acpi_object_integer *index;
+ union acpi_operand_object *target;
+
+ } index;
+
+ struct {
+ union acpi_operand_object *source;
+ struct acpi_object_integer *index;
+ struct acpi_object_integer *length;
+ union acpi_operand_object *target;
+
+ } mid;
+};
+
+/*
+ * Structure used to pass object evaluation parameters.
+ * Purpose is to reduce CPU stack use.
+ */
+struct acpi_evaluate_info {
+ struct acpi_namespace_node *prefix_node;
+ char *pathname;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object **parameters;
+ struct acpi_namespace_node *resolved_node;
+ union acpi_operand_object *return_object;
+ u8 pass_number;
+ u8 parameter_type;
+ u8 return_object_type;
+ u8 flags;
+};
+
+/* Types for parameter_type above */
+
+#define ACPI_PARAM_ARGS 0
+#define ACPI_PARAM_GPE 1
+
+/* Values for Flags above */
+
+#define ACPI_IGNORE_RETURN_VALUE 1
+
+/* Info used by acpi_ns_initialize_devices */
+
+struct acpi_device_walk_info {
+ u16 device_count;
+ u16 num_STA;
+ u16 num_INI;
+ struct acpi_table_desc *table_desc;
+ struct acpi_evaluate_info *evaluate_info;
+};
+
+/* TBD: [Restructure] Merge with struct above */
+
+struct acpi_walk_info {
+ u32 debug_level;
+ u32 count;
+ acpi_owner_id owner_id;
+ u8 display_type;
+};
+
+/* Display Types */
+
+#define ACPI_DISPLAY_SUMMARY (u8) 0
+#define ACPI_DISPLAY_OBJECTS (u8) 1
+#define ACPI_DISPLAY_MASK (u8) 1
+
+#define ACPI_DISPLAY_SHORT (u8) 2
+
+#endif
--- /dev/null
+/******************************************************************************
+ *
+ * Name: actables.h - ACPI table management
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTABLES_H__
+#define __ACTABLES_H__
+
+/* Used in acpi_tb_map_acpi_table for size parameter if table header is to be used */
+
+#define SIZE_IN_HEADER 0
+
+/*
+ * tbconvrt - Table conversion routines
+ */
+acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info);
+
+acpi_status acpi_tb_convert_table_fadt(void);
+
+acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info);
+
+u32
+acpi_tb_get_table_count(struct rsdp_descriptor *RSDP,
+ struct acpi_table_header *RSDT);
+
+/*
+ * tbget - Table "get" routines
+ */
+acpi_status
+acpi_tb_get_table(struct acpi_pointer *address,
+ struct acpi_table_desc *table_info);
+
+acpi_status
+acpi_tb_get_table_header(struct acpi_pointer *address,
+ struct acpi_table_header *return_header);
+
+acpi_status
+acpi_tb_get_table_body(struct acpi_pointer *address,
+ struct acpi_table_header *header,
+ struct acpi_table_desc *table_info);
+
+acpi_status
+acpi_tb_get_table_ptr(acpi_table_type table_type,
+ u32 instance, struct acpi_table_header **table_ptr_loc);
+
+acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address);
+
+void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address);
+
+acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr);
+
+/*
+ * tbgetall - get multiple required tables
+ */
+acpi_status acpi_tb_get_required_tables(void);
+
+/*
+ * tbinstall - Table installation
+ */
+acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info);
+
+acpi_status
+acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type);
+
+acpi_status
+acpi_tb_init_table_descriptor(acpi_table_type table_type,
+ struct acpi_table_desc *table_info);
+
+/*
+ * tbremove - Table removal and deletion
+ */
+void acpi_tb_delete_all_tables(void);
+
+void acpi_tb_delete_tables_by_type(acpi_table_type type);
+
+void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc);
+
+struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc
+ *table_desc);
+
+/*
+ * tbxfroot - RSDP, RSDT utilities
+ */
+acpi_status
+acpi_tb_find_table(char *signature,
+ char *oem_id,
+ char *oem_table_id, struct acpi_table_header **table_ptr);
+
+acpi_status acpi_tb_get_table_rsdt(void);
+
+acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp);
+
+/*
+ * tbutils - common table utilities
+ */
+acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc);
+
+acpi_status
+acpi_tb_verify_table_checksum(struct acpi_table_header *table_header);
+
+u8 acpi_tb_sum_table(void *buffer, u32 length);
+
+u8 acpi_tb_generate_checksum(struct acpi_table_header *table);
+
+void acpi_tb_set_checksum(struct acpi_table_header *table);
+
+acpi_status
+acpi_tb_validate_table_header(struct acpi_table_header *table_header);
+
+#endif /* __ACTABLES_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: actbl.h - Basic ACPI Table Definitions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTBL_H__
+#define __ACTBL_H__
+
+/*
+ * Values for description table header signatures. Useful because they make
+ * it more difficult to inadvertently type in the wrong signature.
+ */
+#define DSDT_SIG "DSDT" /* Differentiated System Description Table */
+#define FADT_SIG "FACP" /* Fixed ACPI Description Table */
+#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */
+#define PSDT_SIG "PSDT" /* Persistent System Description Table */
+#define RSDP_SIG "RSD PTR " /* Root System Description Pointer */
+#define RSDT_SIG "RSDT" /* Root System Description Table */
+#define XSDT_SIG "XSDT" /* Extended System Description Table */
+#define SSDT_SIG "SSDT" /* Secondary System Description Table */
+#define RSDP_NAME "RSDP"
+
+/*
+ * All tables and structures must be byte-packed to match the ACPI
+ * specification, since the tables are provided by the system BIOS
+ */
+#pragma pack(1)
+
+/*
+ * These are the ACPI tables that are directly consumed by the subsystem.
+ *
+ * The RSDP and FACS do not use the common ACPI table header. All other ACPI
+ * tables use the header.
+ *
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
+ */
+
+/*******************************************************************************
+ *
+ * ACPI Table Header. This common header is used by all tables except the
+ * RSDP and FACS. The define is used for direct inclusion of header into
+ * other ACPI tables
+ *
+ ******************************************************************************/
+
+#define ACPI_TABLE_HEADER_DEF \
+ char signature[4]; /* ASCII table signature */\
+ u32 length; /* Length of table in bytes, including this header */\
+ u8 revision; /* ACPI Specification minor version # */\
+ u8 checksum; /* To make sum of entire table == 0 */\
+ char oem_id[6]; /* ASCII OEM identification */\
+ char oem_table_id[8]; /* ASCII OEM table identification */\
+ u32 oem_revision; /* OEM revision number */\
+ char asl_compiler_id[4]; /* ASCII ASL compiler vendor ID */\
+ u32 asl_compiler_revision; /* ASL compiler version */
+
+struct acpi_table_header {
+ACPI_TABLE_HEADER_DEF};
+
+/*
+ * GAS - Generic Address Structure (ACPI 2.0+)
+ */
+struct acpi_generic_address {
+ u8 address_space_id; /* Address space where struct or register exists */
+ u8 register_bit_width; /* Size in bits of given register */
+ u8 register_bit_offset; /* Bit offset within the register */
+ u8 access_width; /* Minimum Access size (ACPI 3.0) */
+ u64 address; /* 64-bit address of struct or register */
+};
+
+/*******************************************************************************
+ *
+ * RSDP - Root System Description Pointer (Signature is "RSD PTR ")
+ *
+ ******************************************************************************/
+
+struct rsdp_descriptor {
+ char signature[8]; /* ACPI signature, contains "RSD PTR " */
+ u8 checksum; /* ACPI 1.0 checksum */
+ char oem_id[6]; /* OEM identification */
+ u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */
+ u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */
+ u32 length; /* Table length in bytes, including header (ACPI 2.0+) */
+ u64 xsdt_physical_address; /* 64-bit physical address of the XSDT (ACPI 2.0+) */
+ u8 extended_checksum; /* Checksum of entire table (ACPI 2.0+) */
+ u8 reserved[3]; /* Reserved, must be zero */
+};
+
+#define ACPI_RSDP_REV0_SIZE 20 /* Size of original ACPI 1.0 RSDP */
+
+/*******************************************************************************
+ *
+ * RSDT/XSDT - Root System Description Tables
+ *
+ ******************************************************************************/
+
+struct rsdt_descriptor {
+ ACPI_TABLE_HEADER_DEF u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
+};
+
+struct xsdt_descriptor {
+ ACPI_TABLE_HEADER_DEF u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */
+};
+
+/*******************************************************************************
+ *
+ * FACS - Firmware ACPI Control Structure (FACS)
+ *
+ ******************************************************************************/
+
+struct facs_descriptor {
+ char signature[4]; /* ASCII table signature */
+ u32 length; /* Length of structure, in bytes */
+ u32 hardware_signature; /* Hardware configuration signature */
+ u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */
+ u32 global_lock; /* Global Lock for shared hardware resources */
+
+ /* Flags (32 bits) */
+
+ u8 S4bios_f:1; /* 00: S4BIOS support is present */
+ u8:7; /* 01-07: Reserved, must be zero */
+ u8 reserved1[3]; /* 08-31: Reserved, must be zero */
+
+ u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */
+ u8 version; /* Version of this table (ACPI 2.0+) */
+ u8 reserved[31]; /* Reserved, must be zero */
+};
+
+#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */
+#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */
+
+/*
+ * Common FACS - This is a version-independent FACS structure used for internal use only
+ */
+struct acpi_common_facs {
+ u32 *global_lock;
+ u64 *firmware_waking_vector;
+ u8 vector_width;
+};
+
+/*******************************************************************************
+ *
+ * FADT - Fixed ACPI Description Table (Signature "FACP")
+ *
+ ******************************************************************************/
+
+/* Fields common to all versions of the FADT */
+
+#define ACPI_FADT_COMMON \
+ ACPI_TABLE_HEADER_DEF \
+ u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \
+ u32 V1_dsdt; /* 32-bit physical address of DSDT */ \
+ u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \
+ u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \
+ u16 sci_int; /* System vector of SCI interrupt */ \
+ u32 smi_cmd; /* Port address of SMI command port */ \
+ u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \
+ u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \
+ u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \
+ u8 pstate_cnt; /* Processor performance state control*/ \
+ u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a Event Reg Blk */ \
+ u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b Event Reg Blk */ \
+ u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \
+ u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \
+ u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \
+ u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \
+ u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \
+ u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \
+ u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ \
+ u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ \
+ u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \
+ u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \
+ u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \
+ u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \
+ u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \
+ u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \
+ u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \
+ u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \
+ u16 flush_size; /* Processor's memory cache line width, in bytes */ \
+ u16 flush_stride; /* Number of flush strides that need to be read */ \
+ u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \
+ u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \
+ u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \
+ u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \
+ u8 century; /* Index to century in RTC CMOS RAM */ \
+ u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ \
+ u8 reserved2; /* Reserved, must be zero */
+
+/*
+ * ACPI 2.0+ FADT
+ */
+struct fadt_descriptor {
+ ACPI_FADT_COMMON
+ /* Flags (32 bits) */
+ u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
+ u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
+ u8 proc_c1:1; /* 02: All processors support C1 state */
+ u8 plvl2_up:1; /* 03: C2 state works on MP system */
+ u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
+ u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
+ u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
+ u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
+ u8 tmr_val_ext:1; /* 08: tmr_val is 32 bits 0=24-bits */
+ u8 dock_cap:1; /* 09: Docking supported */
+ u8 reset_reg_sup:1; /* 10: System reset via the FADT RESET_REG supported */
+ u8 sealed_case:1; /* 11: No internal expansion capabilities and case is sealed */
+ u8 headless:1; /* 12: No local video capabilities or local input devices */
+ u8 cpu_sw_sleep:1; /* 13: Must execute native instruction after writing SLP_TYPx register */
+
+ u8 pci_exp_wak:1; /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
+ u8 use_platform_clock:1; /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
+ u8 S4rtc_sts_valid:1; /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
+ u8 remote_power_on_capable:1; /* 17: System is compatible with remote power on (ACPI 3.0) */
+ u8 force_apic_cluster_model:1; /* 18: All local APICs must use cluster model (ACPI 3.0) */
+ u8 force_apic_physical_destination_mode:1; /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
+ u8:4; /* 20-23: Reserved, must be zero */
+ u8 reserved3; /* 24-31: Reserved, must be zero */
+
+ struct acpi_generic_address reset_register; /* Reset register address in GAS format */
+ u8 reset_value; /* Value to write to the reset_register port to reset the system */
+ u8 reserved4[3]; /* These three bytes must be zero */
+ u64 xfirmware_ctrl; /* 64-bit physical address of FACS */
+ u64 Xdsdt; /* 64-bit physical address of DSDT */
+ struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */
+ struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */
+ struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */
+ struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */
+ struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */
+ struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */
+ struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */
+ struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */
+};
+
+/*
+ * "Down-revved" ACPI 2.0 FADT descriptor
+ * Defined here to allow compiler to generate the length of the struct
+ */
+struct fadt_descriptor_rev2_minus {
+ ACPI_FADT_COMMON u32 flags;
+ struct acpi_generic_address reset_register; /* Reset register address in GAS format */
+ u8 reset_value; /* Value to write to the reset_register port to reset the system. */
+ u8 reserved7[3]; /* Reserved, must be zero */
+};
+
+/*
+ * ACPI 1.0 FADT
+ * Defined here to allow compiler to generate the length of the struct
+ */
+struct fadt_descriptor_rev1 {
+ ACPI_FADT_COMMON u32 flags;
+};
+
+/* FADT: Prefered Power Management Profiles */
+
+#define PM_UNSPECIFIED 0
+#define PM_DESKTOP 1
+#define PM_MOBILE 2
+#define PM_WORKSTATION 3
+#define PM_ENTERPRISE_SERVER 4
+#define PM_SOHO_SERVER 5
+#define PM_APPLIANCE_PC 6
+
+/* FADT: Boot Arch Flags */
+
+#define BAF_LEGACY_DEVICES 0x0001
+#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
+
+#define FADT2_REVISION_ID 3
+#define FADT2_MINUS_REVISION_ID 2
+
+/* Reset to default packing */
+
+#pragma pack()
+
+/*
+ * This macro is temporary until the table bitfield flag definitions
+ * are removed and replaced by a Flags field.
+ */
+#define ACPI_FLAG_OFFSET(d,f,o) (u8) (ACPI_OFFSET (d,f) + \
+ sizeof(((d *)0)->f) + o)
+/*
+ * Get the remaining ACPI tables
+ */
+#include "actbl1.h"
+
+/*
+ * ACPI Table information. We save the table address, length,
+ * and type of memory allocation (mapped or allocated) for each
+ * table for 1) when we exit, and 2) if a new table is installed
+ */
+#define ACPI_MEM_NOT_ALLOCATED 0
+#define ACPI_MEM_ALLOCATED 1
+#define ACPI_MEM_MAPPED 2
+
+/* Definitions for the Flags bitfield member of struct acpi_table_support */
+
+#define ACPI_TABLE_SINGLE 0x00
+#define ACPI_TABLE_MULTIPLE 0x01
+#define ACPI_TABLE_EXECUTABLE 0x02
+
+#define ACPI_TABLE_ROOT 0x00
+#define ACPI_TABLE_PRIMARY 0x10
+#define ACPI_TABLE_SECONDARY 0x20
+#define ACPI_TABLE_ALL 0x30
+#define ACPI_TABLE_TYPE_MASK 0x30
+
+/* Data about each known table type */
+
+struct acpi_table_support {
+ char *name;
+ char *signature;
+ void **global_ptr;
+ u8 sig_length;
+ u8 flags;
+};
+
+extern u8 acpi_fadt_is_v1; /* is set to 1 if FADT is revision 1,
+ * needed for certain workarounds */
+/* Macros used to generate offsets to specific table fields */
+
+#define ACPI_FACS_OFFSET(f) (u8) ACPI_OFFSET (struct facs_descriptor,f)
+#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct fadt_descriptor, f)
+#define ACPI_GAS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_generic_address,f)
+#define ACPI_HDR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_header,f)
+#define ACPI_RSDP_OFFSET(f) (u8) ACPI_OFFSET (struct rsdp_descriptor,f)
+
+#define ACPI_FADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct fadt_descriptor,f,o)
+#define ACPI_FACS_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct facs_descriptor,f,o)
+
+#endif /* __ACTBL_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: actbl1.h - Additional ACPI table definitions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTBL1_H__
+#define __ACTBL1_H__
+
+/*******************************************************************************
+ *
+ * Additional ACPI Tables
+ *
+ * These tables are not consumed directly by the ACPICA subsystem, but are
+ * included here to support device drivers and the AML disassembler.
+ *
+ ******************************************************************************/
+
+/*
+ * Values for description table header signatures. Useful because they make
+ * it more difficult to inadvertently type in the wrong signature.
+ */
+#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
+#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
+#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
+#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
+#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
+#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
+#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
+#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
+#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
+#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
+#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
+#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
+#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
+
+/* Legacy names */
+
+#define APIC_SIG "APIC" /* Multiple APIC Description Table */
+#define BOOT_SIG "BOOT" /* Simple Boot Flag Table */
+#define SBST_SIG "SBST" /* Smart Battery Specification Table */
+
+/*
+ * All tables must be byte-packed to match the ACPI specification, since
+ * the tables are provided by the system BIOS.
+ */
+#pragma pack(1)
+
+/*
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
+ */
+
+/*******************************************************************************
+ *
+ * ASF - Alert Standard Format table (Signature "ASF!")
+ *
+ ******************************************************************************/
+
+struct acpi_table_asf {
+ACPI_TABLE_HEADER_DEF};
+
+#define ACPI_ASF_HEADER_DEF \
+ u8 type; \
+ u8 reserved; \
+ u16 length;
+
+struct acpi_asf_header {
+ACPI_ASF_HEADER_DEF};
+
+/* Values for Type field */
+
+#define ASF_INFO 0
+#define ASF_ALERT 1
+#define ASF_CONTROL 2
+#define ASF_BOOT 3
+#define ASF_ADDRESS 4
+#define ASF_RESERVED 5
+
+/*
+ * ASF subtables
+ */
+
+/* 0: ASF Information */
+
+struct acpi_asf_info {
+ ACPI_ASF_HEADER_DEF u8 min_reset_value;
+ u8 min_poll_interval;
+ u16 system_id;
+ u32 mfg_id;
+ u8 flags;
+ u8 reserved2[3];
+};
+
+/* 1: ASF Alerts */
+
+struct acpi_asf_alert {
+ ACPI_ASF_HEADER_DEF u8 assert_mask;
+ u8 deassert_mask;
+ u8 alerts;
+ u8 data_length;
+ u8 array[1];
+};
+
+/* 2: ASF Remote Control */
+
+struct acpi_asf_remote {
+ ACPI_ASF_HEADER_DEF u8 controls;
+ u8 data_length;
+ u16 reserved2;
+ u8 array[1];
+};
+
+/* 3: ASF RMCP Boot Options */
+
+struct acpi_asf_rmcp {
+ ACPI_ASF_HEADER_DEF u8 capabilities[7];
+ u8 completion_code;
+ u32 enterprise_id;
+ u8 command;
+ u16 parameter;
+ u16 boot_options;
+ u16 oem_parameters;
+};
+
+/* 4: ASF Address */
+
+struct acpi_asf_address {
+ ACPI_ASF_HEADER_DEF u8 eprom_address;
+ u8 devices;
+ u8 smbus_addresses[1];
+};
+
+/*******************************************************************************
+ *
+ * BOOT - Simple Boot Flag Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_boot {
+ ACPI_TABLE_HEADER_DEF u8 cmos_index; /* Index in CMOS RAM for the boot register */
+ u8 reserved[3];
+};
+
+/*******************************************************************************
+ *
+ * CPEP - Corrected Platform Error Polling table
+ *
+ ******************************************************************************/
+
+struct acpi_table_cpep {
+ ACPI_TABLE_HEADER_DEF u64 reserved;
+};
+
+/* Subtable */
+
+struct acpi_cpep_polling {
+ u8 type;
+ u8 length;
+ u8 processor_id; /* Processor ID */
+ u8 processor_eid; /* Processor EID */
+ u32 polling_interval; /* Polling interval (msec) */
+};
+
+/*******************************************************************************
+ *
+ * DBGP - Debug Port table
+ *
+ ******************************************************************************/
+
+struct acpi_table_dbgp {
+ ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address debug_port;
+};
+
+/*******************************************************************************
+ *
+ * ECDT - Embedded Controller Boot Resources Table
+ *
+ ******************************************************************************/
+
+struct ec_boot_resources {
+ ACPI_TABLE_HEADER_DEF struct acpi_generic_address ec_control; /* Address of EC command/status register */
+ struct acpi_generic_address ec_data; /* Address of EC data register */
+ u32 uid; /* Unique ID - must be same as the EC _UID method */
+ u8 gpe_bit; /* The GPE for the EC */
+ u8 ec_id[1]; /* Full namepath of the EC in the ACPI namespace */
+};
+
+/*******************************************************************************
+ *
+ * HPET - High Precision Event Timer table
+ *
+ ******************************************************************************/
+
+struct acpi_hpet_table {
+ ACPI_TABLE_HEADER_DEF u32 hardware_id; /* Hardware ID of event timer block */
+ struct acpi_generic_address base_address; /* Address of event timer block */
+ u8 hpet_number; /* HPET sequence number */
+ u16 clock_tick; /* Main counter min tick, periodic mode */
+ u8 attributes;
+};
+
+#if 0 /* HPET flags to be converted to macros */
+struct { /* Flags (8 bits) */
+ u8 page_protect:1; /* 00: No page protection */
+ u8 page_protect4:1; /* 01: 4_kB page protected */
+ u8 page_protect64:1; /* 02: 64_kB page protected */
+ u8:5; /* 03-07: Reserved, must be zero */
+} flags;
+#endif
+
+/*******************************************************************************
+ *
+ * MADT - Multiple APIC Description Table
+ *
+ ******************************************************************************/
+
+struct multiple_apic_table {
+ ACPI_TABLE_HEADER_DEF u32 local_apic_address; /* Physical address of local APIC */
+
+ /* Flags (32 bits) */
+
+ u8 PCATcompat:1; /* 00: System also has dual 8259s */
+ u8:7; /* 01-07: Reserved, must be zero */
+ u8 reserved1[3]; /* 08-31: Reserved, must be zero */
+};
+
+/* Values for MADT PCATCompat */
+
+#define DUAL_PIC 0
+#define MULTIPLE_APIC 1
+
+/* Common MADT Sub-table header */
+
+#define APIC_HEADER_DEF \
+ u8 type; \
+ u8 length;
+
+struct apic_header {
+APIC_HEADER_DEF};
+
+/* Values for Type in struct apic_header */
+
+#define APIC_PROCESSOR 0
+#define APIC_IO 1
+#define APIC_XRUPT_OVERRIDE 2
+#define APIC_NMI 3
+#define APIC_LOCAL_NMI 4
+#define APIC_ADDRESS_OVERRIDE 5
+#define APIC_IO_SAPIC 6
+#define APIC_LOCAL_SAPIC 7
+#define APIC_XRUPT_SOURCE 8
+#define APIC_RESERVED 9 /* 9 and greater are reserved */
+
+/* Flag definitions for MADT sub-tables */
+
+#define ACPI_MADT_IFLAGS /* INTI flags (16 bits) */ \
+ u8 polarity : 2; /* 00-01: Polarity of APIC I/O input signals */\
+ u8 trigger_mode : 2; /* 02-03: Trigger mode of APIC input signals */\
+ u8 : 4; /* 04-07: Reserved, must be zero */\
+ u8 reserved1; /* 08-15: Reserved, must be zero */
+
+#define ACPI_MADT_LFLAGS /* Local Sapic flags (32 bits) */ \
+ u8 processor_enabled: 1; /* 00: Processor is usable if set */\
+ u8 : 7; /* 01-07: Reserved, must be zero */\
+ u8 reserved2[3]; /* 08-31: Reserved, must be zero */
+
+/* Values for MPS INTI flags */
+
+#define POLARITY_CONFORMS 0
+#define POLARITY_ACTIVE_HIGH 1
+#define POLARITY_RESERVED 2
+#define POLARITY_ACTIVE_LOW 3
+
+#define TRIGGER_CONFORMS 0
+#define TRIGGER_EDGE 1
+#define TRIGGER_RESERVED 2
+#define TRIGGER_LEVEL 3
+
+/*
+ * MADT Sub-tables, correspond to Type in struct apic_header
+ */
+
+/* 0: processor APIC */
+
+struct madt_processor_apic {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ u8 local_apic_id; /* Processor's local APIC id */
+ ACPI_MADT_LFLAGS};
+
+/* 1: IO APIC */
+
+struct madt_io_apic {
+ APIC_HEADER_DEF u8 io_apic_id; /* I/O APIC ID */
+ u8 reserved; /* Reserved - must be zero */
+ u32 address; /* APIC physical address */
+ u32 interrupt; /* Global system interrupt where INTI lines start */
+};
+
+/* 2: Interrupt Override */
+
+struct madt_interrupt_override {
+ APIC_HEADER_DEF u8 bus; /* 0 - ISA */
+ u8 source; /* Interrupt source (IRQ) */
+ u32 interrupt; /* Global system interrupt */
+ ACPI_MADT_IFLAGS};
+
+/* 3: NMI Sources */
+
+struct madt_nmi_source {
+ APIC_HEADER_DEF ACPI_MADT_IFLAGS u32 interrupt; /* Global system interrupt */
+};
+
+/* 4: Local APIC NMI */
+
+struct madt_local_apic_nmi {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ ACPI_MADT_IFLAGS u8 lint; /* LINTn to which NMI is connected */
+};
+
+/* 5: Address Override */
+
+struct madt_address_override {
+ APIC_HEADER_DEF u16 reserved; /* Reserved, must be zero */
+ u64 address; /* APIC physical address */
+};
+
+/* 6: I/O Sapic */
+
+struct madt_io_sapic {
+ APIC_HEADER_DEF u8 io_sapic_id; /* I/O SAPIC ID */
+ u8 reserved; /* Reserved, must be zero */
+ u32 interrupt_base; /* Glocal interrupt for SAPIC start */
+ u64 address; /* SAPIC physical address */
+};
+
+/* 7: Local Sapic */
+
+struct madt_local_sapic {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ u8 local_sapic_id; /* SAPIC ID */
+ u8 local_sapic_eid; /* SAPIC EID */
+ u8 reserved[3]; /* Reserved, must be zero */
+ ACPI_MADT_LFLAGS u32 processor_uID; /* Numeric UID - ACPI 3.0 */
+ char processor_uIDstring[1]; /* String UID - ACPI 3.0 */
+};
+
+/* 8: Platform Interrupt Source */
+
+struct madt_interrupt_source {
+ APIC_HEADER_DEF ACPI_MADT_IFLAGS u8 interrupt_type; /* 1=PMI, 2=INIT, 3=corrected */
+ u8 processor_id; /* Processor ID */
+ u8 processor_eid; /* Processor EID */
+ u8 io_sapic_vector; /* Vector value for PMI interrupts */
+ u32 interrupt; /* Global system interrupt */
+ u32 flags; /* Interrupt Source Flags */
+};
+
+#ifdef DUPLICATE_DEFINITION_WITH_LINUX_ACPI_H
+/*******************************************************************************
+ *
+ * MCFG - PCI Memory Mapped Configuration table and sub-table
+ *
+ ******************************************************************************/
+
+struct acpi_table_mcfg {
+ ACPI_TABLE_HEADER_DEF u8 reserved[8];
+};
+
+struct acpi_mcfg_allocation {
+ u64 base_address; /* Base address, processor-relative */
+ u16 pci_segment; /* PCI segment group number */
+ u8 start_bus_number; /* Starting PCI Bus number */
+ u8 end_bus_number; /* Final PCI Bus number */
+ u32 reserved;
+};
+#endif
+
+/*******************************************************************************
+ *
+ * SBST - Smart Battery Specification Table
+ *
+ ******************************************************************************/
+
+struct smart_battery_table {
+ ACPI_TABLE_HEADER_DEF u32 warning_level;
+ u32 low_level;
+ u32 critical_level;
+};
+
+/*******************************************************************************
+ *
+ * SLIT - System Locality Distance Information Table
+ *
+ ******************************************************************************/
+
+struct system_locality_info {
+ ACPI_TABLE_HEADER_DEF u64 locality_count;
+ u8 entry[1][1];
+};
+
+/*******************************************************************************
+ *
+ * SPCR - Serial Port Console Redirection table
+ *
+ ******************************************************************************/
+
+struct acpi_table_spcr {
+ ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address serial_port;
+ u8 interrupt_type;
+ u8 pc_interrupt;
+ u32 interrupt;
+ u8 baud_rate;
+ u8 parity;
+ u8 stop_bits;
+ u8 flow_control;
+ u8 terminal_type;
+ u8 reserved2;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u32 pci_flags;
+ u8 pci_segment;
+ u32 reserved3;
+};
+
+/*******************************************************************************
+ *
+ * SPMI - Server Platform Management Interface table
+ *
+ ******************************************************************************/
+
+struct acpi_table_spmi {
+ ACPI_TABLE_HEADER_DEF u8 reserved;
+ u8 interface_type;
+ u16 spec_revision; /* Version of IPMI */
+ u8 interrupt_type;
+ u8 gpe_number; /* GPE assigned */
+ u8 reserved2;
+ u8 pci_device_flag;
+ u32 interrupt;
+ struct acpi_generic_address ipmi_register;
+ u8 pci_segment;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+};
+
+/*******************************************************************************
+ *
+ * SRAT - System Resource Affinity Table
+ *
+ ******************************************************************************/
+
+struct system_resource_affinity {
+ ACPI_TABLE_HEADER_DEF u32 reserved1; /* Must be value '1' */
+ u64 reserved2; /* Reserved, must be zero */
+};
+
+/* SRAT common sub-table header */
+
+#define SRAT_SUBTABLE_HEADER \
+ u8 type; \
+ u8 length;
+
+/* Values for Type above */
+
+#define SRAT_CPU_AFFINITY 0
+#define SRAT_MEMORY_AFFINITY 1
+#define SRAT_RESERVED 2
+
+/* SRAT sub-tables */
+
+struct static_resource_alloc {
+ SRAT_SUBTABLE_HEADER u8 proximity_domain_lo;
+ u8 apic_id;
+
+ /* Flags (32 bits) */
+
+ u8 enabled:1; /* 00: Use affinity structure */
+ u8:7; /* 01-07: Reserved, must be zero */
+ u8 reserved3[3]; /* 08-31: Reserved, must be zero */
+
+ u8 local_sapic_eid;
+ u8 proximity_domain_hi[3];
+ u32 reserved4; /* Reserved, must be zero */
+};
+
+struct memory_affinity {
+ SRAT_SUBTABLE_HEADER u32 proximity_domain;
+ u16 reserved3;
+ u64 base_address;
+ u64 address_length;
+ u32 reserved4;
+
+ /* Flags (32 bits) */
+
+ u8 enabled:1; /* 00: Use affinity structure */
+ u8 hot_pluggable:1; /* 01: Memory region is hot pluggable */
+ u8 non_volatile:1; /* 02: Memory is non-volatile */
+ u8:5; /* 03-07: Reserved, must be zero */
+ u8 reserved5[3]; /* 08-31: Reserved, must be zero */
+
+ u64 reserved6; /* Reserved, must be zero */
+};
+
+/*******************************************************************************
+ *
+ * TCPA - Trusted Computing Platform Alliance table
+ *
+ ******************************************************************************/
+
+struct acpi_table_tcpa {
+ ACPI_TABLE_HEADER_DEF u16 reserved;
+ u32 max_log_length; /* Maximum length for the event log area */
+ u64 log_address; /* Address of the event log area */
+};
+
+/*******************************************************************************
+ *
+ * WDRT - Watchdog Resource Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_wdrt {
+ ACPI_TABLE_HEADER_DEF u32 header_length; /* Watchdog Header Length */
+ u8 pci_segment; /* PCI Segment number */
+ u8 pci_bus; /* PCI Bus number */
+ u8 pci_device; /* PCI Device number */
+ u8 pci_function; /* PCI Function number */
+ u32 timer_period; /* Period of one timer count (msec) */
+ u32 max_count; /* Maximum counter value supported */
+ u32 min_count; /* Minimum counter value */
+ u8 flags;
+ u8 reserved[3];
+ u32 entries; /* Number of watchdog entries that follow */
+};
+
+#if 0 /* Flags, will be converted to macros */
+u8 enabled:1; /* 00: Timer enabled */
+u8:6; /* 01-06: Reserved */
+u8 sleep_stop:1; /* 07: Timer stopped in sleep state */
+#endif
+
+/* Macros used to generate offsets to specific table fields */
+
+#define ACPI_ASF0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_info,f)
+#define ACPI_ASF1_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_alert,f)
+#define ACPI_ASF2_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_remote,f)
+#define ACPI_ASF3_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_rmcp,f)
+#define ACPI_ASF4_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_address,f)
+#define ACPI_BOOT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_boot,f)
+#define ACPI_CPEP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_cpep,f)
+#define ACPI_CPEP0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_cpep_polling,f)
+#define ACPI_DBGP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_dbgp,f)
+#define ACPI_ECDT_OFFSET(f) (u8) ACPI_OFFSET (struct ec_boot_resources,f)
+#define ACPI_HPET_OFFSET(f) (u8) ACPI_OFFSET (struct hpet_table,f)
+#define ACPI_MADT_OFFSET(f) (u8) ACPI_OFFSET (struct multiple_apic_table,f)
+#define ACPI_MADT0_OFFSET(f) (u8) ACPI_OFFSET (struct madt_processor_apic,f)
+#define ACPI_MADT1_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_apic,f)
+#define ACPI_MADT2_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_override,f)
+#define ACPI_MADT3_OFFSET(f) (u8) ACPI_OFFSET (struct madt_nmi_source,f)
+#define ACPI_MADT4_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_apic_nmi,f)
+#define ACPI_MADT5_OFFSET(f) (u8) ACPI_OFFSET (struct madt_address_override,f)
+#define ACPI_MADT6_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_sapic,f)
+#define ACPI_MADT7_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_sapic,f)
+#define ACPI_MADT8_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_source,f)
+#define ACPI_MADTH_OFFSET(f) (u8) ACPI_OFFSET (struct apic_header,f)
+#define ACPI_MCFG_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_mcfg,f)
+#define ACPI_MCFG0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_mcfg_allocation,f)
+#define ACPI_SBST_OFFSET(f) (u8) ACPI_OFFSET (struct smart_battery_table,f)
+#define ACPI_SLIT_OFFSET(f) (u8) ACPI_OFFSET (struct system_locality_info,f)
+#define ACPI_SPCR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spcr,f)
+#define ACPI_SPMI_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spmi,f)
+#define ACPI_SRAT_OFFSET(f) (u8) ACPI_OFFSET (struct system_resource_affinity,f)
+#define ACPI_SRAT0_OFFSET(f) (u8) ACPI_OFFSET (struct static_resource_alloc,f)
+#define ACPI_SRAT1_OFFSET(f) (u8) ACPI_OFFSET (struct memory_affinity,f)
+#define ACPI_TCPA_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_tcpa,f)
+#define ACPI_WDRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_wdrt,f)
+
+#define ACPI_HPET_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct hpet_table,f,o)
+#define ACPI_SRAT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct static_resource_alloc,f,o)
+#define ACPI_SRAT1_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct memory_affinity,f,o)
+#define ACPI_MADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct multiple_apic_table,f,o)
+#define ACPI_MADT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_processor_apic,f,o)
+#define ACPI_MADT2_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_override,f,o)
+#define ACPI_MADT3_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_nmi_source,f,o)
+#define ACPI_MADT4_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_apic_nmi,f,o)
+#define ACPI_MADT7_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_sapic,f,o)
+#define ACPI_MADT8_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_source,f,o)
+
+/* Reset to default packing */
+
+#pragma pack()
+
+#endif /* __ACTBL1_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: actbl2.h - ACPI Specification Revision 2.0 Tables
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTBL2_H__
+#define __ACTBL2_H__
+
+/* Code moved to both actbl.h and actbl1.h */
+
+#endif /* __ACTBL2_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: actbl71.h - IA-64 Extensions to the ACPI Spec Rev. 0.71
+ * This file includes tables specific to this
+ * specification revision.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2003, R. Byron Moore
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ACTBL71_H__
+#define __ACTBL71_H__
+
+/* 0.71 FADT address_space data item bitmasks defines */
+/* If the associated bit is zero then it is in memory space else in io space */
+
+#define SMI_CMD_ADDRESS_SPACE 0x01
+#define PM1_BLK_ADDRESS_SPACE 0x02
+#define PM2_CNT_BLK_ADDRESS_SPACE 0x04
+#define PM_TMR_BLK_ADDRESS_SPACE 0x08
+#define GPE0_BLK_ADDRESS_SPACE 0x10
+#define GPE1_BLK_ADDRESS_SPACE 0x20
+
+/* Only for clarity in declarations */
+
+typedef u64 IO_ADDRESS;
+
+#pragma pack(1)
+struct { /* Root System Descriptor Pointer */
+ NATIVE_CHAR signature[8]; /* contains "RSD PTR " */
+ u8 checksum; /* to make sum of struct == 0 */
+ NATIVE_CHAR oem_id[6]; /* OEM identification */
+ u8 reserved; /* Must be 0 for 1.0, 2 for 2.0 */
+ u64 rsdt_physical_address; /* 64-bit physical address of RSDT */
+};
+
+/*****************************************/
+/* IA64 Extensions to ACPI Spec Rev 0.71 */
+/* for the Root System Description Table */
+/*****************************************/
+struct {
+ struct acpi_table_header header; /* Table header */
+ u32 reserved_pad; /* IA64 alignment, must be 0 */
+ u64 table_offset_entry[1]; /* Array of pointers to other */
+ /* tables' headers */
+};
+
+/*******************************************/
+/* IA64 Extensions to ACPI Spec Rev 0.71 */
+/* for the Firmware ACPI Control Structure */
+/*******************************************/
+struct {
+ NATIVE_CHAR signature[4]; /* signature "FACS" */
+ u32 length; /* length of structure, in bytes */
+ u32 hardware_signature; /* hardware configuration signature */
+ u32 reserved4; /* must be 0 */
+ u64 firmware_waking_vector; /* ACPI OS waking vector */
+ u64 global_lock; /* Global Lock */
+ u32 S4bios_f:1; /* Indicates if S4BIOS support is present */
+ u32 reserved1:31; /* must be 0 */
+ u8 reserved3[28]; /* reserved - must be zero */
+};
+
+/******************************************/
+/* IA64 Extensions to ACPI Spec Rev 0.71 */
+/* for the Fixed ACPI Description Table */
+/******************************************/
+struct {
+ struct acpi_table_header header; /* table header */
+ u32 reserved_pad; /* IA64 alignment, must be 0 */
+ u64 firmware_ctrl; /* 64-bit Physical address of FACS */
+ u64 dsdt; /* 64-bit Physical address of DSDT */
+ u8 model; /* System Interrupt Model */
+ u8 address_space; /* Address Space Bitmask */
+ u16 sci_int; /* System vector of SCI interrupt */
+ u8 acpi_enable; /* value to write to smi_cmd to enable ACPI */
+ u8 acpi_disable; /* value to write to smi_cmd to disable ACPI */
+ u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */
+ u8 reserved2; /* reserved - must be zero */
+ u64 smi_cmd; /* Port address of SMI command port */
+ u64 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */
+ u64 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */
+ u64 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */
+ u64 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */
+ u64 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */
+ u64 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */
+ u64 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */
+ u64 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */
+ u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */
+ u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */
+ u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */
+ u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */
+ u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */
+ u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */
+ u8 gpe1_base; /* offset in gpe model where gpe1 events start */
+ u8 reserved3; /* reserved */
+ u16 plvl2_lat; /* worst case HW latency to enter/exit C2 state */
+ u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */
+ u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */
+ u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */
+ u8 century; /* index to century in RTC CMOS RAM */
+ u8 reserved4; /* reserved */
+ u32 flush_cash:1; /* PAL_FLUSH_CACHE is correctly supported */
+ u32 reserved5:1; /* reserved - must be zero */
+ u32 proc_c1:1; /* all processors support C1 state */
+ u32 plvl2_up:1; /* C2 state works on MP system */
+ u32 pwr_button:1; /* Power button is handled as a generic feature */
+ u32 sleep_button:1; /* Sleep button is handled as a generic feature, or not present */
+ u32 fixed_rTC:1; /* RTC wakeup stat not in fixed register space */
+ u32 rtcs4:1; /* RTC wakeup stat not possible from S4 */
+ u32 tmr_val_ext:1; /* tmr_val is 32 bits */
+ u32 dock_cap:1; /* Supports Docking */
+ u32 reserved6:22; /* reserved - must be zero */
+};
+
+#pragma pack()
+
+#endif /* __ACTBL71_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: actypes.h - Common data types for the entire ACPI subsystem
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTYPES_H__
+#define __ACTYPES_H__
+
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
+/*
+ * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header
+ * and must be either 16, 32, or 64
+ */
+#ifndef ACPI_MACHINE_WIDTH
+#error ACPI_MACHINE_WIDTH not defined
+#endif
+
+/*! [Begin] no source code translation */
+
+/*
+ * Data type ranges
+ * Note: These macros are designed to be compiler independent as well as
+ * working around problems that some 32-bit compilers have with 64-bit
+ * constants.
+ */
+#define ACPI_UINT8_MAX (UINT8) (~((UINT8) 0)) /* 0xFF */
+#define ACPI_UINT16_MAX (UINT16)(~((UINT16) 0)) /* 0xFFFF */
+#define ACPI_UINT32_MAX (UINT32)(~((UINT32) 0)) /* 0xFFFFFFFF */
+#define ACPI_UINT64_MAX (UINT64)(~((UINT64) 0)) /* 0xFFFFFFFFFFFFFFFF */
+#define ACPI_ASCII_MAX 0x7F
+
+/*
+ * Architecture-specific ACPICA Subsystem Data Types
+ *
+ * The goal of these types is to provide source code portability across
+ * 16-bit, 32-bit, and 64-bit targets.
+ *
+ * 1) The following types are of fixed size for all targets (16/32/64):
+ *
+ * BOOLEAN Logical boolean
+ *
+ * UINT8 8-bit (1 byte) unsigned value
+ * UINT16 16-bit (2 byte) unsigned value
+ * UINT32 32-bit (4 byte) unsigned value
+ * UINT64 64-bit (8 byte) unsigned value
+ *
+ * INT16 16-bit (2 byte) signed value
+ * INT32 32-bit (4 byte) signed value
+ * INT64 64-bit (8 byte) signed value
+ *
+ * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the
+ * compiler-dependent header(s) and were introduced because there is no common
+ * 64-bit integer type across the various compilation models, as shown in
+ * the table below.
+ *
+ * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit
+ * char 8 8 8 8 8 8
+ * short 16 16 16 16 16 16
+ * _int32 32
+ * int 32 64 32 32 16 16
+ * long 64 64 32 32 32 32
+ * long long 64 64
+ * pointer 64 64 64 32 32 32
+ *
+ * Note: ILP64 and LP32 are currently not supported.
+ *
+ *
+ * 2) These types represent the native word size of the target mode of the
+ * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are
+ * usually used for memory allocation, efficient loop counters, and array
+ * indexes. The types are similar to the size_t type in the C library and are
+ * required because there is no C type that consistently represents the native
+ * data width.
+ *
+ * ACPI_SIZE 16/32/64-bit unsigned value
+ * ACPI_NATIVE_UINT 16/32/64-bit unsigned value
+ * ACPI_NATIVE_INT 16/32/64-bit signed value
+ *
+ */
+
+/*******************************************************************************
+ *
+ * Common types for all compilers, all targets
+ *
+ ******************************************************************************/
+
+typedef unsigned char BOOLEAN;
+typedef unsigned char UINT8;
+typedef unsigned short UINT16;
+typedef COMPILER_DEPENDENT_UINT64 UINT64;
+typedef COMPILER_DEPENDENT_INT64 INT64;
+
+/*! [End] no source code translation !*/
+
+/*******************************************************************************
+ *
+ * Types specific to 64-bit targets
+ *
+ ******************************************************************************/
+
+#if ACPI_MACHINE_WIDTH == 64
+
+/*! [Begin] no source code translation (keep the typedefs as-is) */
+
+typedef unsigned int UINT32;
+typedef int INT32;
+
+/*! [End] no source code translation !*/
+
+typedef u64 acpi_native_uint;
+typedef s64 acpi_native_int;
+
+typedef u64 acpi_table_ptr;
+typedef u64 acpi_io_address;
+typedef u64 acpi_physical_address;
+
+#define ACPI_MAX_PTR ACPI_UINT64_MAX
+#define ACPI_SIZE_MAX ACPI_UINT64_MAX
+
+#define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */
+
+/*
+ * In the case of the Itanium Processor Family (IPF), the hardware does not
+ * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag
+ * to indicate that special precautions must be taken to avoid alignment faults.
+ * (IA64 or ia64 is currently used by existing compilers to indicate IPF.)
+ *
+ * Note: Em64_t and other X86-64 processors support misaligned transfers,
+ * so there is no need to define this flag.
+ */
+#if defined (__IA64__) || defined (__ia64__)
+#define ACPI_MISALIGNMENT_NOT_SUPPORTED
+#endif
+
+/*******************************************************************************
+ *
+ * Types specific to 32-bit targets
+ *
+ ******************************************************************************/
+
+#elif ACPI_MACHINE_WIDTH == 32
+
+/*! [Begin] no source code translation (keep the typedefs as-is) */
+
+typedef unsigned int UINT32;
+typedef int INT32;
+
+/*! [End] no source code translation !*/
+
+typedef u32 acpi_native_uint;
+typedef s32 acpi_native_int;
+
+typedef u64 acpi_table_ptr;
+typedef u32 acpi_io_address;
+typedef u64 acpi_physical_address;
+
+#define ACPI_MAX_PTR ACPI_UINT32_MAX
+#define ACPI_SIZE_MAX ACPI_UINT32_MAX
+
+/*******************************************************************************
+ *
+ * Types specific to 16-bit targets
+ *
+ ******************************************************************************/
+
+#elif ACPI_MACHINE_WIDTH == 16
+
+/*! [Begin] no source code translation (keep the typedefs as-is) */
+
+typedef unsigned long UINT32;
+typedef short INT16;
+typedef long INT32;
+
+/*! [End] no source code translation !*/
+
+typedef u16 acpi_native_uint;
+typedef s16 acpi_native_int;
+
+typedef u32 acpi_table_ptr;
+typedef u32 acpi_io_address;
+typedef char *acpi_physical_address;
+
+#define ACPI_MAX_PTR ACPI_UINT16_MAX
+#define ACPI_SIZE_MAX ACPI_UINT16_MAX
+
+#define ACPI_USE_NATIVE_DIVIDE /* No 64-bit integers, ok to use native divide */
+
+/* 64-bit integers cannot be supported */
+
+#define ACPI_NO_INTEGER64_SUPPORT
+
+#else
+
+/* ACPI_MACHINE_WIDTH must be either 64, 32, or 16 */
+
+#error unknown ACPI_MACHINE_WIDTH
+#endif
+
+/* Variable-width type, used instead of clib size_t */
+
+typedef acpi_native_uint acpi_size;
+
+/*******************************************************************************
+ *
+ * OS-dependent and compiler-dependent types
+ *
+ * If the defaults below are not appropriate for the host system, they can
+ * be defined in the compiler-specific or OS-specific header, and this will
+ * take precedence.
+ *
+ ******************************************************************************/
+
+/* Value returned by acpi_os_get_thread_id */
+
+#ifndef acpi_thread_id
+#define acpi_thread_id acpi_native_uint
+#endif
+
+/* Object returned from acpi_os_create_lock */
+
+#ifndef acpi_spinlock
+#define acpi_spinlock void *
+#endif
+
+/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */
+
+#ifndef acpi_cpu_flags
+#define acpi_cpu_flags acpi_native_uint
+#endif
+
+/* Object returned from acpi_os_create_cache */
+
+#ifndef acpi_cache_t
+#define acpi_cache_t struct acpi_memory_list
+#endif
+
+/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */
+
+#ifndef acpi_uintptr_t
+#define acpi_uintptr_t void *
+#endif
+
+/*
+ * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because
+ * some compilers can catch printf format string problems
+ */
+#ifndef ACPI_PRINTF_LIKE
+#define ACPI_PRINTF_LIKE(c)
+#endif
+
+/*
+ * Some compilers complain about unused variables. Sometimes we don't want to
+ * use all the variables (for example, _acpi_module_name). This allows us
+ * to to tell the compiler in a per-variable manner that a variable
+ * is unused
+ */
+#ifndef ACPI_UNUSED_VAR
+#define ACPI_UNUSED_VAR
+#endif
+
+/*
+ * All ACPICA functions that are available to the rest of the kernel are
+ * tagged with this macro which can be defined as appropriate for the host.
+ */
+#ifndef ACPI_EXPORT_SYMBOL
+#define ACPI_EXPORT_SYMBOL(symbol)
+#endif
+
+/*******************************************************************************
+ *
+ * Independent types
+ *
+ ******************************************************************************/
+
+/*
+ * Pointer overlays to avoid lots of typecasting for
+ * code that accepts both physical and logical pointers.
+ */
+union acpi_pointers {
+ acpi_physical_address physical;
+ void *logical;
+ acpi_table_ptr value;
+};
+
+struct acpi_pointer {
+ u32 pointer_type;
+ union acpi_pointers pointer;
+};
+
+/* pointer_types for above */
+
+#define ACPI_PHYSICAL_POINTER 0x01
+#define ACPI_LOGICAL_POINTER 0x02
+
+/* Processor mode */
+
+#define ACPI_PHYSICAL_ADDRESSING 0x04
+#define ACPI_LOGICAL_ADDRESSING 0x08
+#define ACPI_MEMORY_MODE 0x0C
+
+#define ACPI_PHYSMODE_PHYSPTR ACPI_PHYSICAL_ADDRESSING | ACPI_PHYSICAL_POINTER
+#define ACPI_LOGMODE_PHYSPTR ACPI_LOGICAL_ADDRESSING | ACPI_PHYSICAL_POINTER
+#define ACPI_LOGMODE_LOGPTR ACPI_LOGICAL_ADDRESSING | ACPI_LOGICAL_POINTER
+
+/* Logical defines and NULL */
+
+#ifdef FALSE
+#undef FALSE
+#endif
+#define FALSE (1 == 0)
+
+#ifdef TRUE
+#undef TRUE
+#endif
+#define TRUE (1 == 1)
+
+#ifndef NULL
+#define NULL (void *) 0
+#endif
+
+/*
+ * Mescellaneous types
+ */
+typedef u32 acpi_status; /* All ACPI Exceptions */
+typedef u32 acpi_name; /* 4-byte ACPI name */
+typedef char *acpi_string; /* Null terminated ASCII string */
+typedef void *acpi_handle; /* Actually a ptr to a NS Node */
+
+struct uint64_struct {
+ u32 lo;
+ u32 hi;
+};
+
+union uint64_overlay {
+ u64 full;
+ struct uint64_struct part;
+};
+
+struct uint32_struct {
+ u32 lo;
+ u32 hi;
+};
+
+/* Synchronization objects */
+
+#define acpi_mutex void *
+#define acpi_semaphore void *
+
+/*
+ * Acpi integer width. In ACPI version 1, integers are
+ * 32 bits. In ACPI version 2, integers are 64 bits.
+ * Note that this pertains to the ACPI integer type only, not
+ * other integers used in the implementation of the ACPI CA
+ * subsystem.
+ */
+#ifdef ACPI_NO_INTEGER64_SUPPORT
+
+/* 32-bit integers only, no 64-bit support */
+
+typedef u32 acpi_integer;
+#define ACPI_INTEGER_MAX ACPI_UINT32_MAX
+#define ACPI_INTEGER_BIT_SIZE 32
+#define ACPI_MAX_DECIMAL_DIGITS 10 /* 2^32 = 4,294,967,296 */
+
+#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 32-bit divide */
+
+#else
+
+/* 64-bit integers */
+
+typedef u64 acpi_integer;
+#define ACPI_INTEGER_MAX ACPI_UINT64_MAX
+#define ACPI_INTEGER_BIT_SIZE 64
+#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */
+
+#if ACPI_MACHINE_WIDTH == 64
+#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */
+#endif
+#endif
+
+#define ACPI_MAX64_DECIMAL_DIGITS 20
+#define ACPI_MAX32_DECIMAL_DIGITS 10
+#define ACPI_MAX16_DECIMAL_DIGITS 5
+#define ACPI_MAX8_DECIMAL_DIGITS 3
+
+/*
+ * Constants with special meanings
+ */
+#define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR)
+
+/*
+ * Initialization sequence
+ */
+#define ACPI_FULL_INITIALIZATION 0x00
+#define ACPI_NO_ADDRESS_SPACE_INIT 0x01
+#define ACPI_NO_HARDWARE_INIT 0x02
+#define ACPI_NO_EVENT_INIT 0x04
+#define ACPI_NO_HANDLER_INIT 0x08
+#define ACPI_NO_ACPI_ENABLE 0x10
+#define ACPI_NO_DEVICE_INIT 0x20
+#define ACPI_NO_OBJECT_INIT 0x40
+
+/*
+ * Initialization state
+ */
+#define ACPI_INITIALIZED_OK 0x01
+
+/*
+ * Power state values
+ */
+#define ACPI_STATE_UNKNOWN (u8) 0xFF
+
+#define ACPI_STATE_S0 (u8) 0
+#define ACPI_STATE_S1 (u8) 1
+#define ACPI_STATE_S2 (u8) 2
+#define ACPI_STATE_S3 (u8) 3
+#define ACPI_STATE_S4 (u8) 4
+#define ACPI_STATE_S5 (u8) 5
+#define ACPI_S_STATES_MAX ACPI_STATE_S5
+#define ACPI_S_STATE_COUNT 6
+
+#define ACPI_STATE_D0 (u8) 0
+#define ACPI_STATE_D1 (u8) 1
+#define ACPI_STATE_D2 (u8) 2
+#define ACPI_STATE_D3 (u8) 3
+#define ACPI_D_STATES_MAX ACPI_STATE_D3
+#define ACPI_D_STATE_COUNT 4
+
+#define ACPI_STATE_C0 (u8) 0
+#define ACPI_STATE_C1 (u8) 1
+#define ACPI_STATE_C2 (u8) 2
+#define ACPI_STATE_C3 (u8) 3
+#define ACPI_C_STATES_MAX ACPI_STATE_C3
+#define ACPI_C_STATE_COUNT 4
+
+/*
+ * Sleep type invalid value
+ */
+#define ACPI_SLEEP_TYPE_MAX 0x7
+#define ACPI_SLEEP_TYPE_INVALID 0xFF
+
+/*
+ * Standard notify values
+ */
+#define ACPI_NOTIFY_BUS_CHECK (u8) 0
+#define ACPI_NOTIFY_DEVICE_CHECK (u8) 1
+#define ACPI_NOTIFY_DEVICE_WAKE (u8) 2
+#define ACPI_NOTIFY_EJECT_REQUEST (u8) 3
+#define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 4
+#define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 5
+#define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 6
+#define ACPI_NOTIFY_POWER_FAULT (u8) 7
+
+/*
+ * Table types. These values are passed to the table related APIs
+ */
+typedef u32 acpi_table_type;
+
+#define ACPI_TABLE_ID_RSDP (acpi_table_type) 0
+#define ACPI_TABLE_ID_DSDT (acpi_table_type) 1
+#define ACPI_TABLE_ID_FADT (acpi_table_type) 2
+#define ACPI_TABLE_ID_FACS (acpi_table_type) 3
+#define ACPI_TABLE_ID_PSDT (acpi_table_type) 4
+#define ACPI_TABLE_ID_SSDT (acpi_table_type) 5
+#define ACPI_TABLE_ID_XSDT (acpi_table_type) 6
+#define ACPI_TABLE_ID_MAX 6
+#define ACPI_NUM_TABLE_TYPES (ACPI_TABLE_ID_MAX+1)
+
+/*
+ * Types associated with ACPI names and objects. The first group of
+ * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition
+ * of the ACPI object_type() operator (See the ACPI Spec). Therefore,
+ * only add to the first group if the spec changes.
+ *
+ * NOTE: Types must be kept in sync with the global acpi_ns_properties
+ * and acpi_ns_type_names arrays.
+ */
+typedef u32 acpi_object_type;
+
+#define ACPI_TYPE_ANY 0x00
+#define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */
+#define ACPI_TYPE_STRING 0x02
+#define ACPI_TYPE_BUFFER 0x03
+#define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */
+#define ACPI_TYPE_FIELD_UNIT 0x05
+#define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */
+#define ACPI_TYPE_EVENT 0x07
+#define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */
+#define ACPI_TYPE_MUTEX 0x09
+#define ACPI_TYPE_REGION 0x0A
+#define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */
+#define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */
+#define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */
+#define ACPI_TYPE_BUFFER_FIELD 0x0E
+#define ACPI_TYPE_DDB_HANDLE 0x0F
+#define ACPI_TYPE_DEBUG_OBJECT 0x10
+
+#define ACPI_TYPE_EXTERNAL_MAX 0x10
+
+/*
+ * These are object types that do not map directly to the ACPI
+ * object_type() operator. They are used for various internal purposes only.
+ * If new predefined ACPI_TYPEs are added (via the ACPI specification), these
+ * internal types must move upwards. (There is code that depends on these
+ * values being contiguous with the external types above.)
+ */
+#define ACPI_TYPE_LOCAL_REGION_FIELD 0x11
+#define ACPI_TYPE_LOCAL_BANK_FIELD 0x12
+#define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13
+#define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */
+#define ACPI_TYPE_LOCAL_ALIAS 0x15
+#define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16
+#define ACPI_TYPE_LOCAL_NOTIFY 0x17
+#define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18
+#define ACPI_TYPE_LOCAL_RESOURCE 0x19
+#define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A
+#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */
+
+#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */
+
+/*
+ * These are special object types that never appear in
+ * a Namespace node, only in an union acpi_operand_object
+ */
+#define ACPI_TYPE_LOCAL_EXTRA 0x1C
+#define ACPI_TYPE_LOCAL_DATA 0x1D
+
+#define ACPI_TYPE_LOCAL_MAX 0x1D
+
+/* All types above here are invalid */
+
+#define ACPI_TYPE_INVALID 0x1E
+#define ACPI_TYPE_NOT_FOUND 0xFF
+
+/*
+ * All I/O
+ */
+#define ACPI_READ 0
+#define ACPI_WRITE 1
+#define ACPI_IO_MASK 1
+
+/*
+ * Event Types: Fixed & General Purpose
+ */
+typedef u32 acpi_event_type;
+
+/*
+ * Fixed events
+ */
+#define ACPI_EVENT_PMTIMER 0
+#define ACPI_EVENT_GLOBAL 1
+#define ACPI_EVENT_POWER_BUTTON 2
+#define ACPI_EVENT_SLEEP_BUTTON 3
+#define ACPI_EVENT_RTC 4
+#define ACPI_EVENT_MAX 4
+#define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1
+
+/*
+ * Event Status - Per event
+ * -------------
+ * The encoding of acpi_event_status is illustrated below.
+ * Note that a set bit (1) indicates the property is TRUE
+ * (e.g. if bit 0 is set then the event is enabled).
+ * +-------------+-+-+-+
+ * | Bits 31:3 |2|1|0|
+ * +-------------+-+-+-+
+ * | | | |
+ * | | | +- Enabled?
+ * | | +--- Enabled for wake?
+ * | +----- Set?
+ * +----------- <Reserved>
+ */
+typedef u32 acpi_event_status;
+
+#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00
+#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01
+#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02
+#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
+
+/*
+ * General Purpose Events (GPE)
+ */
+#define ACPI_GPE_INVALID 0xFF
+#define ACPI_GPE_MAX 0xFF
+#define ACPI_NUM_GPE 256
+
+#define ACPI_GPE_ENABLE 0
+#define ACPI_GPE_DISABLE 1
+
+/*
+ * GPE info flags - Per GPE
+ * +-+-+-+---+---+-+
+ * |7|6|5|4:3|2:1|0|
+ * +-+-+-+---+---+-+
+ * | | | | | |
+ * | | | | | +--- Interrupt type: Edge or Level Triggered
+ * | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime
+ * | | | +--- Type of dispatch -- to method, handler, or none
+ * | | +--- Enabled for runtime?
+ * | +--- Enabled for wake?
+ * +--- System state when GPE ocurred (running/waking)
+ */
+#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01
+#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01
+#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
+
+#define ACPI_GPE_TYPE_MASK (u8) 0x06
+#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06
+#define ACPI_GPE_TYPE_WAKE (u8) 0x02
+#define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */
+
+#define ACPI_GPE_DISPATCH_MASK (u8) 0x18
+#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08
+#define ACPI_GPE_DISPATCH_METHOD (u8) 0x10
+#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */
+
+#define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20
+#define ACPI_GPE_RUN_ENABLED (u8) 0x20
+#define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */
+
+#define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40
+#define ACPI_GPE_WAKE_ENABLED (u8) 0x40
+#define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */
+
+#define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */
+
+#define ACPI_GPE_SYSTEM_MASK (u8) 0x80
+#define ACPI_GPE_SYSTEM_RUNNING (u8) 0x80
+#define ACPI_GPE_SYSTEM_WAKING (u8) 0x00
+
+/*
+ * Flags for GPE and Lock interfaces
+ */
+#define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */
+#define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */
+
+#define ACPI_NOT_ISR 0x1
+#define ACPI_ISR 0x0
+
+/* Notify types */
+
+#define ACPI_SYSTEM_NOTIFY 0x1
+#define ACPI_DEVICE_NOTIFY 0x2
+#define ACPI_ALL_NOTIFY 0x3
+#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3
+
+#define ACPI_MAX_SYS_NOTIFY 0x7f
+
+/* Address Space (Operation Region) Types */
+
+typedef u8 acpi_adr_space_type;
+
+#define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0
+#define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1
+#define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2
+#define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3
+#define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4
+#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5
+#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6
+#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 7
+#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127
+
+/*
+ * bit_register IDs
+ * These are bitfields defined within the full ACPI registers
+ */
+#define ACPI_BITREG_TIMER_STATUS 0x00
+#define ACPI_BITREG_BUS_MASTER_STATUS 0x01
+#define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02
+#define ACPI_BITREG_POWER_BUTTON_STATUS 0x03
+#define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04
+#define ACPI_BITREG_RT_CLOCK_STATUS 0x05
+#define ACPI_BITREG_WAKE_STATUS 0x06
+#define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07
+
+#define ACPI_BITREG_TIMER_ENABLE 0x08
+#define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09
+#define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A
+#define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B
+#define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C
+#define ACPI_BITREG_WAKE_ENABLE 0x0D
+#define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0E
+
+#define ACPI_BITREG_SCI_ENABLE 0x0F
+#define ACPI_BITREG_BUS_MASTER_RLD 0x10
+#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x11
+#define ACPI_BITREG_SLEEP_TYPE_A 0x12
+#define ACPI_BITREG_SLEEP_TYPE_B 0x13
+#define ACPI_BITREG_SLEEP_ENABLE 0x14
+
+#define ACPI_BITREG_ARB_DISABLE 0x15
+
+#define ACPI_BITREG_MAX 0x15
+#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1
+
+/*
+ * External ACPI object definition
+ */
+union acpi_object {
+ acpi_object_type type; /* See definition of acpi_ns_type for values */
+ struct {
+ acpi_object_type type;
+ acpi_integer value; /* The actual number */
+ } integer;
+
+ struct {
+ acpi_object_type type;
+ u32 length; /* # of bytes in string, excluding trailing null */
+ char *pointer; /* points to the string value */
+ } string;
+
+ struct {
+ acpi_object_type type;
+ u32 length; /* # of bytes in buffer */
+ u8 *pointer; /* points to the buffer */
+ } buffer;
+
+ struct {
+ acpi_object_type type;
+ u32 fill1;
+ acpi_handle handle; /* object reference */
+ } reference;
+
+ struct {
+ acpi_object_type type;
+ u32 count; /* # of elements in package */
+ union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */
+ } package;
+
+ struct {
+ acpi_object_type type;
+ u32 proc_id;
+ acpi_io_address pblk_address;
+ u32 pblk_length;
+ } processor;
+
+ struct {
+ acpi_object_type type;
+ u32 system_level;
+ u32 resource_order;
+ } power_resource;
+};
+
+/*
+ * List of objects, used as a parameter list for control method evaluation
+ */
+struct acpi_object_list {
+ u32 count;
+ union acpi_object *pointer;
+};
+
+/*
+ * Miscellaneous common Data Structures used by the interfaces
+ */
+#define ACPI_NO_BUFFER 0
+#define ACPI_ALLOCATE_BUFFER (acpi_size) (-1)
+#define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2)
+
+struct acpi_buffer {
+ acpi_size length; /* Length in bytes of the buffer */
+ void *pointer; /* pointer to buffer */
+};
+
+/*
+ * name_type for acpi_get_name
+ */
+#define ACPI_FULL_PATHNAME 0
+#define ACPI_SINGLE_NAME 1
+#define ACPI_NAME_TYPE_MAX 1
+
+/*
+ * Structure and flags for acpi_get_system_info
+ */
+#define ACPI_SYS_MODE_UNKNOWN 0x0000
+#define ACPI_SYS_MODE_ACPI 0x0001
+#define ACPI_SYS_MODE_LEGACY 0x0002
+#define ACPI_SYS_MODES_MASK 0x0003
+
+/*
+ * ACPI Table Info. One per ACPI table _type_
+ */
+struct acpi_table_info {
+ u32 count;
+};
+
+/*
+ * System info returned by acpi_get_system_info()
+ */
+struct acpi_system_info {
+ u32 acpi_ca_version;
+ u32 flags;
+ u32 timer_resolution;
+ u32 reserved1;
+ u32 reserved2;
+ u32 debug_level;
+ u32 debug_layer;
+ u32 num_table_types;
+ struct acpi_table_info table_info[ACPI_TABLE_ID_MAX + 1];
+};
+
+/*
+ * Types specific to the OS service interfaces
+ */
+typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context);
+
+typedef void
+ (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context);
+
+/*
+ * Various handlers and callback procedures
+ */
+typedef u32(*acpi_event_handler) (void *context);
+
+typedef
+void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
+
+typedef
+void (*acpi_object_handler) (acpi_handle object, u32 function, void *data);
+
+typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function);
+
+#define ACPI_INIT_DEVICE_INI 1
+
+typedef
+acpi_status(*acpi_exception_handler) (acpi_status aml_status,
+ acpi_name name,
+ u16 opcode,
+ u32 aml_offset, void *context);
+
+/* Address Spaces (For Operation Regions) */
+
+typedef
+acpi_status(*acpi_adr_space_handler) (u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ acpi_integer * value,
+ void *handler_context,
+ void *region_context);
+
+#define ACPI_DEFAULT_HANDLER NULL
+
+typedef
+acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
+ u32 function,
+ void *handler_context,
+ void **region_context);
+
+#define ACPI_REGION_ACTIVATE 0
+#define ACPI_REGION_DEACTIVATE 1
+
+typedef
+acpi_status(*acpi_walk_callback) (acpi_handle obj_handle,
+ u32 nesting_level,
+ void *context, void **return_value);
+
+/* Interrupt handler return values */
+
+#define ACPI_INTERRUPT_NOT_HANDLED 0x00
+#define ACPI_INTERRUPT_HANDLED 0x01
+
+/* Common string version of device HIDs and UIDs */
+
+struct acpi_device_id {
+ char value[ACPI_DEVICE_ID_LENGTH];
+};
+
+/* Common string version of device CIDs */
+
+struct acpi_compatible_id {
+ char value[ACPI_MAX_CID_LENGTH];
+};
+
+struct acpi_compatible_id_list {
+ u32 count;
+ u32 size;
+ struct acpi_compatible_id id[1];
+};
+
+/* Structure and flags for acpi_get_object_info */
+
+#define ACPI_VALID_STA 0x0001
+#define ACPI_VALID_ADR 0x0002
+#define ACPI_VALID_HID 0x0004
+#define ACPI_VALID_UID 0x0008
+#define ACPI_VALID_CID 0x0010
+#define ACPI_VALID_SXDS 0x0020
+
+/* Flags for _STA method */
+
+#define ACPI_STA_DEVICE_PRESENT 0x01
+#define ACPI_STA_DEVICE_ENABLED 0x02
+#define ACPI_STA_DEVICE_UI 0x04
+#define ACPI_STA_DEVICE_FUNCTIONING 0x08
+#define ACPI_STA_DEVICE_OK 0x08 /* Synonym */
+#define ACPI_STA_BATTERY_PRESENT 0x10
+
+#define ACPI_COMMON_OBJ_INFO \
+ acpi_object_type type; /* ACPI object type */ \
+ acpi_name name /* ACPI object Name */
+
+struct acpi_obj_info_header {
+ ACPI_COMMON_OBJ_INFO;
+};
+
+/* Structure returned from Get Object Info */
+
+struct acpi_device_info {
+ ACPI_COMMON_OBJ_INFO;
+
+ u32 valid; /* Indicates which fields below are valid */
+ u32 current_status; /* _STA value */
+ acpi_integer address; /* _ADR value if any */
+ struct acpi_device_id hardware_id; /* _HID value if any */
+ struct acpi_device_id unique_id; /* _UID value if any */
+ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
+ struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */
+};
+
+/* Context structs for address space handlers */
+
+struct acpi_pci_id {
+ u16 segment;
+ u16 bus;
+ u16 device;
+ u16 function;
+};
+
+struct acpi_mem_space_context {
+ u32 length;
+ acpi_physical_address address;
+ acpi_physical_address mapped_physical_address;
+ u8 *mapped_logical_address;
+ acpi_size mapped_length;
+};
+
+/*
+ * Definitions for Resource Attributes
+ */
+typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */
+typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
+
+/*
+ * Memory Attributes
+ */
+#define ACPI_READ_ONLY_MEMORY (u8) 0x00
+#define ACPI_READ_WRITE_MEMORY (u8) 0x01
+
+#define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00
+#define ACPI_CACHABLE_MEMORY (u8) 0x01
+#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02
+#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03
+
+/*
+ * IO Attributes
+ * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
+ * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
+ */
+#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01
+#define ACPI_ISA_ONLY_RANGES (u8) 0x02
+#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES)
+
+/* Type of translation - 1=Sparse, 0=Dense */
+
+#define ACPI_SPARSE_TRANSLATION (u8) 0x01
+
+/*
+ * IO Port Descriptor Decode
+ */
+#define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */
+#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */
+
+/*
+ * IRQ Attributes
+ */
+#define ACPI_LEVEL_SENSITIVE (u8) 0x00
+#define ACPI_EDGE_SENSITIVE (u8) 0x01
+
+#define ACPI_ACTIVE_HIGH (u8) 0x00
+#define ACPI_ACTIVE_LOW (u8) 0x01
+
+#define ACPI_EXCLUSIVE (u8) 0x00
+#define ACPI_SHARED (u8) 0x01
+
+/*
+ * DMA Attributes
+ */
+#define ACPI_COMPATIBILITY (u8) 0x00
+#define ACPI_TYPE_A (u8) 0x01
+#define ACPI_TYPE_B (u8) 0x02
+#define ACPI_TYPE_F (u8) 0x03
+
+#define ACPI_NOT_BUS_MASTER (u8) 0x00
+#define ACPI_BUS_MASTER (u8) 0x01
+
+#define ACPI_TRANSFER_8 (u8) 0x00
+#define ACPI_TRANSFER_8_16 (u8) 0x01
+#define ACPI_TRANSFER_16 (u8) 0x02
+
+/*
+ * Start Dependent Functions Priority definitions
+ */
+#define ACPI_GOOD_CONFIGURATION (u8) 0x00
+#define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01
+#define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02
+
+/*
+ * 16, 32 and 64-bit Address Descriptor resource types
+ */
+#define ACPI_MEMORY_RANGE (u8) 0x00
+#define ACPI_IO_RANGE (u8) 0x01
+#define ACPI_BUS_NUMBER_RANGE (u8) 0x02
+
+#define ACPI_ADDRESS_NOT_FIXED (u8) 0x00
+#define ACPI_ADDRESS_FIXED (u8) 0x01
+
+#define ACPI_POS_DECODE (u8) 0x00
+#define ACPI_SUB_DECODE (u8) 0x01
+
+#define ACPI_PRODUCER (u8) 0x00
+#define ACPI_CONSUMER (u8) 0x01
+
+/*
+ * If possible, pack the following structures to byte alignment
+ */
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+#pragma pack(1)
+#endif
+
+/* UUID data structures for use in vendor-defined resource descriptors */
+
+struct acpi_uuid {
+ u8 data[ACPI_UUID_LENGTH];
+};
+
+struct acpi_vendor_uuid {
+ u8 subtype;
+ u8 data[ACPI_UUID_LENGTH];
+};
+
+/*
+ * Structures used to describe device resources
+ */
+struct acpi_resource_irq {
+ u8 triggering;
+ u8 polarity;
+ u8 sharable;
+ u8 interrupt_count;
+ u8 interrupts[1];
+};
+
+struct acpi_resource_dma {
+ u8 type;
+ u8 bus_master;
+ u8 transfer;
+ u8 channel_count;
+ u8 channels[1];
+};
+
+struct acpi_resource_start_dependent {
+ u8 compatibility_priority;
+ u8 performance_robustness;
+};
+
+/*
+ * END_DEPENDENT_FUNCTIONS_RESOURCE struct is not
+ * needed because it has no fields
+ */
+
+struct acpi_resource_io {
+ u8 io_decode;
+ u8 alignment;
+ u8 address_length;
+ u16 minimum;
+ u16 maximum;
+};
+
+struct acpi_resource_fixed_io {
+ u16 address;
+ u8 address_length;
+};
+
+struct acpi_resource_vendor {
+ u16 byte_length;
+ u8 byte_data[1];
+};
+
+/* Vendor resource with UUID info (introduced in ACPI 3.0) */
+
+struct acpi_resource_vendor_typed {
+ u16 byte_length;
+ u8 uuid_subtype;
+ u8 uuid[ACPI_UUID_LENGTH];
+ u8 byte_data[1];
+};
+
+struct acpi_resource_end_tag {
+ u8 checksum;
+};
+
+struct acpi_resource_memory24 {
+ u8 write_protect;
+ u16 minimum;
+ u16 maximum;
+ u16 alignment;
+ u16 address_length;
+};
+
+struct acpi_resource_memory32 {
+ u8 write_protect;
+ u32 minimum;
+ u32 maximum;
+ u32 alignment;
+ u32 address_length;
+};
+
+struct acpi_resource_fixed_memory32 {
+ u8 write_protect;
+ u32 address;
+ u32 address_length;
+};
+
+struct acpi_memory_attribute {
+ u8 write_protect;
+ u8 caching;
+ u8 range_type;
+ u8 translation;
+};
+
+struct acpi_io_attribute {
+ u8 range_type;
+ u8 translation;
+ u8 translation_type;
+ u8 reserved1;
+};
+
+union acpi_resource_attribute {
+ struct acpi_memory_attribute mem;
+ struct acpi_io_attribute io;
+
+ /* Used for the *word_space macros */
+
+ u8 type_specific;
+};
+
+struct acpi_resource_source {
+ u8 index;
+ u16 string_length;
+ char *string_ptr;
+};
+
+/* Fields common to all address descriptors, 16/32/64 bit */
+
+#define ACPI_RESOURCE_ADDRESS_COMMON \
+ u8 resource_type; \
+ u8 producer_consumer; \
+ u8 decode; \
+ u8 min_address_fixed; \
+ u8 max_address_fixed; \
+ union acpi_resource_attribute info;
+
+struct acpi_resource_address {
+ACPI_RESOURCE_ADDRESS_COMMON};
+
+struct acpi_resource_address16 {
+ ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
+ u16 minimum;
+ u16 maximum;
+ u16 translation_offset;
+ u16 address_length;
+ struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address32 {
+ ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
+ u32 minimum;
+ u32 maximum;
+ u32 translation_offset;
+ u32 address_length;
+ struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address64 {
+ ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+ struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_extended_address64 {
+ ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD;
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+ u64 type_specific;
+};
+
+struct acpi_resource_extended_irq {
+ u8 producer_consumer;
+ u8 triggering;
+ u8 polarity;
+ u8 sharable;
+ u8 interrupt_count;
+ struct acpi_resource_source resource_source;
+ u32 interrupts[1];
+};
+
+struct acpi_resource_generic_register {
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size;
+ u64 address;
+};
+
+/* ACPI_RESOURCE_TYPEs */
+
+#define ACPI_RESOURCE_TYPE_IRQ 0
+#define ACPI_RESOURCE_TYPE_DMA 1
+#define ACPI_RESOURCE_TYPE_START_DEPENDENT 2
+#define ACPI_RESOURCE_TYPE_END_DEPENDENT 3
+#define ACPI_RESOURCE_TYPE_IO 4
+#define ACPI_RESOURCE_TYPE_FIXED_IO 5
+#define ACPI_RESOURCE_TYPE_VENDOR 6
+#define ACPI_RESOURCE_TYPE_END_TAG 7
+#define ACPI_RESOURCE_TYPE_MEMORY24 8
+#define ACPI_RESOURCE_TYPE_MEMORY32 9
+#define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10
+#define ACPI_RESOURCE_TYPE_ADDRESS16 11
+#define ACPI_RESOURCE_TYPE_ADDRESS32 12
+#define ACPI_RESOURCE_TYPE_ADDRESS64 13
+#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */
+#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15
+#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16
+#define ACPI_RESOURCE_TYPE_MAX 16
+
+union acpi_resource_data {
+ struct acpi_resource_irq irq;
+ struct acpi_resource_dma dma;
+ struct acpi_resource_start_dependent start_dpf;
+ struct acpi_resource_io io;
+ struct acpi_resource_fixed_io fixed_io;
+ struct acpi_resource_vendor vendor;
+ struct acpi_resource_vendor_typed vendor_typed;
+ struct acpi_resource_end_tag end_tag;
+ struct acpi_resource_memory24 memory24;
+ struct acpi_resource_memory32 memory32;
+ struct acpi_resource_fixed_memory32 fixed_memory32;
+ struct acpi_resource_address16 address16;
+ struct acpi_resource_address32 address32;
+ struct acpi_resource_address64 address64;
+ struct acpi_resource_extended_address64 ext_address64;
+ struct acpi_resource_extended_irq extended_irq;
+ struct acpi_resource_generic_register generic_reg;
+
+ /* Common fields */
+
+ struct acpi_resource_address address; /* Common 16/32/64 address fields */
+};
+
+struct acpi_resource {
+ u32 type;
+ u32 length;
+ union acpi_resource_data data;
+};
+
+/* restore default alignment */
+
+#pragma pack()
+
+#define ACPI_RS_SIZE_MIN 12
+#define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */
+#define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type))
+
+#define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length)
+
+struct acpi_pci_routing_table {
+ u32 length;
+ u32 pin;
+ acpi_integer address; /* here for 64-bit alignment */
+ u32 source_index;
+ char source[4]; /* pad to 64 bits so sizeof() works in all cases */
+};
+
+#endif /* __ACTYPES_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ACUTILS_H
+#define _ACUTILS_H
+
+extern const u8 acpi_gbl_resource_aml_sizes[];
+
+/* Strings used by the disassembler and debugger resource dump routines */
+
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+
+extern const char *acpi_gbl_bm_decode[];
+extern const char *acpi_gbl_config_decode[];
+extern const char *acpi_gbl_consume_decode[];
+extern const char *acpi_gbl_dec_decode[];
+extern const char *acpi_gbl_he_decode[];
+extern const char *acpi_gbl_io_decode[];
+extern const char *acpi_gbl_ll_decode[];
+extern const char *acpi_gbl_max_decode[];
+extern const char *acpi_gbl_mem_decode[];
+extern const char *acpi_gbl_min_decode[];
+extern const char *acpi_gbl_mtp_decode[];
+extern const char *acpi_gbl_rng_decode[];
+extern const char *acpi_gbl_rw_decode[];
+extern const char *acpi_gbl_shr_decode[];
+extern const char *acpi_gbl_siz_decode[];
+extern const char *acpi_gbl_trs_decode[];
+extern const char *acpi_gbl_ttp_decode[];
+extern const char *acpi_gbl_typ_decode[];
+#endif
+
+/* Types for Resource descriptor entries */
+
+#define ACPI_INVALID_RESOURCE 0
+#define ACPI_FIXED_LENGTH 1
+#define ACPI_VARIABLE_LENGTH 2
+#define ACPI_SMALL_VARIABLE_LENGTH 3
+
+typedef
+acpi_status(*acpi_walk_aml_callback) (u8 * aml,
+ u32 length,
+ u32 offset,
+ u8 resource_index, void **context);
+
+typedef
+acpi_status(*acpi_pkg_callback) (u8 object_type,
+ union acpi_operand_object * source_object,
+ union acpi_generic_state * state,
+ void *context);
+
+struct acpi_pkg_info {
+ u8 *free_space;
+ acpi_size length;
+ u32 object_space;
+ u32 num_packages;
+};
+
+#define REF_INCREMENT (u16) 0
+#define REF_DECREMENT (u16) 1
+#define REF_FORCE_DELETE (u16) 2
+
+/* acpi_ut_dump_buffer */
+
+#define DB_BYTE_DISPLAY 1
+#define DB_WORD_DISPLAY 2
+#define DB_DWORD_DISPLAY 4
+#define DB_QWORD_DISPLAY 8
+
+/*
+ * utglobal - Global data structures and procedures
+ */
+void acpi_ut_init_globals(void);
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+char *acpi_ut_get_mutex_name(u32 mutex_id);
+
+#endif
+
+char *acpi_ut_get_type_name(acpi_object_type type);
+
+char *acpi_ut_get_node_name(void *object);
+
+char *acpi_ut_get_descriptor_name(void *object);
+
+char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc);
+
+char *acpi_ut_get_region_name(u8 space_id);
+
+char *acpi_ut_get_event_name(u32 event_id);
+
+char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position);
+
+u8 acpi_ut_valid_object_type(acpi_object_type type);
+
+/*
+ * utinit - miscellaneous initialization and shutdown
+ */
+acpi_status acpi_ut_hardware_initialize(void);
+
+void acpi_ut_subsystem_shutdown(void);
+
+acpi_status acpi_ut_validate_fadt(void);
+
+/*
+ * utclib - Local implementations of C library functions
+ */
+#ifndef ACPI_USE_SYSTEM_CLIBRARY
+
+acpi_size acpi_ut_strlen(const char *string);
+
+char *acpi_ut_strcpy(char *dst_string, const char *src_string);
+
+char *acpi_ut_strncpy(char *dst_string,
+ const char *src_string, acpi_size count);
+
+int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count);
+
+int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count);
+
+int acpi_ut_strcmp(const char *string1, const char *string2);
+
+char *acpi_ut_strcat(char *dst_string, const char *src_string);
+
+char *acpi_ut_strncat(char *dst_string,
+ const char *src_string, acpi_size count);
+
+u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base);
+
+char *acpi_ut_strstr(char *string1, char *string2);
+
+void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count);
+
+void *acpi_ut_memset(void *dest, acpi_native_uint value, acpi_size count);
+
+int acpi_ut_to_upper(int c);
+
+int acpi_ut_to_lower(int c);
+
+extern const u8 _acpi_ctype[];
+
+#define _ACPI_XA 0x00 /* extra alphabetic - not supported */
+#define _ACPI_XS 0x40 /* extra space */
+#define _ACPI_BB 0x00 /* BEL, BS, etc. - not supported */
+#define _ACPI_CN 0x20 /* CR, FF, HT, NL, VT */
+#define _ACPI_DI 0x04 /* '0'-'9' */
+#define _ACPI_LO 0x02 /* 'a'-'z' */
+#define _ACPI_PU 0x10 /* punctuation */
+#define _ACPI_SP 0x08 /* space */
+#define _ACPI_UP 0x01 /* 'A'-'Z' */
+#define _ACPI_XD 0x80 /* '0'-'9', 'A'-'F', 'a'-'f' */
+
+#define ACPI_IS_DIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_DI))
+#define ACPI_IS_SPACE(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_SP))
+#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
+#define ACPI_IS_UPPER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
+#define ACPI_IS_LOWER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
+#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
+#define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
+
+#endif /* ACPI_USE_SYSTEM_CLIBRARY */
+
+/*
+ * utcopy - Object construction and conversion interfaces
+ */
+acpi_status
+acpi_ut_build_simple_object(union acpi_operand_object *obj,
+ union acpi_object *user_obj,
+ u8 * data_space, u32 * buffer_space_used);
+
+acpi_status
+acpi_ut_build_package_object(union acpi_operand_object *obj,
+ u8 * buffer, u32 * space_used);
+
+acpi_status
+acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj,
+ struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_ut_copy_eobject_to_iobject(union acpi_object *obj,
+ union acpi_operand_object **internal_obj);
+
+acpi_status
+acpi_ut_copy_isimple_to_isimple(union acpi_operand_object *source_obj,
+ union acpi_operand_object *dest_obj);
+
+acpi_status
+acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
+ union acpi_operand_object **dest_desc,
+ struct acpi_walk_state *walk_state);
+
+/*
+ * utcreate - Object creation
+ */
+acpi_status
+acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action);
+
+/*
+ * utdebug - Debug interfaces
+ */
+void acpi_ut_init_stack_ptr_trace(void);
+
+void acpi_ut_track_stack_ptr(void);
+
+void
+acpi_ut_trace(u32 line_number,
+ const char *function_name, char *module_name, u32 component_id);
+
+void
+acpi_ut_trace_ptr(u32 line_number,
+ const char *function_name,
+ char *module_name, u32 component_id, void *pointer);
+
+void
+acpi_ut_trace_u32(u32 line_number,
+ const char *function_name,
+ char *module_name, u32 component_id, u32 integer);
+
+void
+acpi_ut_trace_str(u32 line_number,
+ const char *function_name,
+ char *module_name, u32 component_id, char *string);
+
+void
+acpi_ut_exit(u32 line_number,
+ const char *function_name, char *module_name, u32 component_id);
+
+void
+acpi_ut_status_exit(u32 line_number,
+ const char *function_name,
+ char *module_name, u32 component_id, acpi_status status);
+
+void
+acpi_ut_value_exit(u32 line_number,
+ const char *function_name,
+ char *module_name, u32 component_id, acpi_integer value);
+
+void
+acpi_ut_ptr_exit(u32 line_number,
+ const char *function_name,
+ char *module_name, u32 component_id, u8 * ptr);
+
+void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id);
+
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display);
+
+void acpi_ut_report_error(char *module_name, u32 line_number);
+
+void acpi_ut_report_info(char *module_name, u32 line_number);
+
+void acpi_ut_report_warning(char *module_name, u32 line_number);
+
+/* Error and message reporting interfaces */
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_debug_print(u32 requested_debug_level,
+ u32 line_number,
+ const char *function_name,
+ char *module_name,
+ u32 component_id, char *format, ...) ACPI_PRINTF_LIKE(6);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_debug_print_raw(u32 requested_debug_level,
+ u32 line_number,
+ const char *function_name,
+ char *module_name,
+ u32 component_id,
+ char *format, ...) ACPI_PRINTF_LIKE(6);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_error(char *module_name,
+ u32 line_number, char *format, ...) ACPI_PRINTF_LIKE(3);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_exception(char *module_name,
+ u32 line_number,
+ acpi_status status, char *format, ...) ACPI_PRINTF_LIKE(4);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_warning(char *module_name,
+ u32 line_number, char *format, ...) ACPI_PRINTF_LIKE(3);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_info(char *module_name,
+ u32 line_number, char *format, ...) ACPI_PRINTF_LIKE(3);
+
+/*
+ * utdelete - Object deletion and reference counts
+ */
+void acpi_ut_add_reference(union acpi_operand_object *object);
+
+void acpi_ut_remove_reference(union acpi_operand_object *object);
+
+void acpi_ut_delete_internal_package_object(union acpi_operand_object *object);
+
+void acpi_ut_delete_internal_simple_object(union acpi_operand_object *object);
+
+void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
+
+/*
+ * uteval - object evaluation
+ */
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
+ char *path,
+ u32 expected_return_btypes,
+ union acpi_operand_object **return_desc);
+
+acpi_status
+acpi_ut_evaluate_numeric_object(char *object_name,
+ struct acpi_namespace_node *device_node,
+ acpi_integer * address);
+
+acpi_status
+acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
+ struct acpi_device_id *hid);
+
+acpi_status
+acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
+ struct acpi_compatible_id_list **return_cid_list);
+
+acpi_status
+acpi_ut_execute_STA(struct acpi_namespace_node *device_node,
+ u32 * status_flags);
+
+acpi_status
+acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
+ struct acpi_device_id *uid);
+
+acpi_status
+acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest);
+
+/*
+ * utobject - internal object create/delete/cache routines
+ */
+union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name,
+ u32 line_number,
+ u32 component_id,
+ acpi_object_type
+ type);
+
+void *acpi_ut_allocate_object_desc_dbg(char *module_name,
+ u32 line_number, u32 component_id);
+
+#define acpi_ut_create_internal_object(t) acpi_ut_create_internal_object_dbg (_acpi_module_name,__LINE__,_COMPONENT,t)
+#define acpi_ut_allocate_object_desc() acpi_ut_allocate_object_desc_dbg (_acpi_module_name,__LINE__,_COMPONENT)
+
+void acpi_ut_delete_object_desc(union acpi_operand_object *object);
+
+u8 acpi_ut_valid_internal_object(void *object);
+
+union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size);
+
+union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size);
+
+acpi_status
+acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
+
+/*
+ * utstate - Generic state creation/cache routines
+ */
+void
+acpi_ut_push_generic_state(union acpi_generic_state **list_head,
+ union acpi_generic_state *state);
+
+union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
+ **list_head);
+
+union acpi_generic_state *acpi_ut_create_generic_state(void);
+
+struct acpi_thread_state *acpi_ut_create_thread_state(void);
+
+union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
+ *object, u16 action);
+
+union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
+ void *external_object,
+ u16 index);
+
+acpi_status
+acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
+ u16 action,
+ union acpi_generic_state **state_list);
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_ut_create_pkg_state_and_push(void *internal_object,
+ void *external_object,
+ u16 index,
+ union acpi_generic_state **state_list);
+#endif /* ACPI_FUTURE_USAGE */
+
+union acpi_generic_state *acpi_ut_create_control_state(void);
+
+void acpi_ut_delete_generic_state(union acpi_generic_state *state);
+
+/*
+ * utmath
+ */
+acpi_status
+acpi_ut_divide(acpi_integer in_dividend,
+ acpi_integer in_divisor,
+ acpi_integer * out_quotient, acpi_integer * out_remainder);
+
+acpi_status
+acpi_ut_short_divide(acpi_integer in_dividend,
+ u32 divisor,
+ acpi_integer * out_quotient, u32 * out_remainder);
+
+/*
+ * utmisc
+ */
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
+
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
+
+acpi_status
+acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
+ void *target_object,
+ acpi_pkg_callback walk_callback, void *context);
+
+void acpi_ut_strupr(char *src_string);
+
+void acpi_ut_print_string(char *string, u8 max_length);
+
+u8 acpi_ut_valid_acpi_name(u32 name);
+
+acpi_name acpi_ut_repair_name(acpi_name name);
+
+u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position);
+
+acpi_status
+acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
+
+/* Values for Base above (16=Hex, 10=Decimal) */
+
+#define ACPI_ANY_BASE 0
+
+u32 acpi_ut_dword_byte_swap(u32 value);
+
+void acpi_ut_set_integer_width(u8 revision);
+
+#ifdef ACPI_DEBUG_OUTPUT
+void
+acpi_ut_display_init_pathname(u8 type,
+ struct acpi_namespace_node *obj_handle,
+ char *path);
+#endif
+
+/*
+ * utresrc
+ */
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+ acpi_size aml_length,
+ acpi_walk_aml_callback user_function, void **context);
+
+acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
+
+u32 acpi_ut_get_descriptor_length(void *aml);
+
+u16 acpi_ut_get_resource_length(void *aml);
+
+u8 acpi_ut_get_resource_header_length(void *aml);
+
+u8 acpi_ut_get_resource_type(void *aml);
+
+acpi_status
+acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc,
+ u8 ** end_tag);
+
+/*
+ * utmutex - mutex support
+ */
+acpi_status acpi_ut_mutex_initialize(void);
+
+void acpi_ut_mutex_terminate(void);
+
+acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id);
+
+acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id);
+
+/*
+ * utalloc - memory allocation and object caching
+ */
+acpi_status acpi_ut_create_caches(void);
+
+acpi_status acpi_ut_delete_caches(void);
+
+acpi_status acpi_ut_validate_buffer(struct acpi_buffer *buffer);
+
+acpi_status
+acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
+ acpi_size required_length);
+
+void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line);
+
+void *acpi_ut_allocate_zeroed(acpi_size size,
+ u32 component, char *module, u32 line);
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+void *acpi_ut_allocate_and_track(acpi_size size,
+ u32 component, char *module, u32 line);
+
+void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
+ u32 component, char *module, u32 line);
+
+void
+acpi_ut_free_and_track(void *address, u32 component, char *module, u32 line);
+
+#ifdef ACPI_FUTURE_USAGE
+void acpi_ut_dump_allocation_info(void);
+#endif /* ACPI_FUTURE_USAGE */
+
+void acpi_ut_dump_allocations(u32 component, char *module);
+
+acpi_status
+acpi_ut_create_list(char *list_name,
+ u16 object_size, struct acpi_memory_list **return_cache);
+
+#endif
+
+#endif /* _ACUTILS_H */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: amlcode.h - Definitions for AML, as included in "definition blocks"
+ * Declarations and definitions contained herein are derived
+ * directly from the ACPI specification.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __AMLCODE_H__
+#define __AMLCODE_H__
+
+/* primary opcodes */
+
+#define AML_NULL_CHAR (u16) 0x00
+
+#define AML_ZERO_OP (u16) 0x00
+#define AML_ONE_OP (u16) 0x01
+#define AML_UNASSIGNED (u16) 0x02
+#define AML_ALIAS_OP (u16) 0x06
+#define AML_NAME_OP (u16) 0x08
+#define AML_BYTE_OP (u16) 0x0a
+#define AML_WORD_OP (u16) 0x0b
+#define AML_DWORD_OP (u16) 0x0c
+#define AML_STRING_OP (u16) 0x0d
+#define AML_QWORD_OP (u16) 0x0e /* ACPI 2.0 */
+#define AML_SCOPE_OP (u16) 0x10
+#define AML_BUFFER_OP (u16) 0x11
+#define AML_PACKAGE_OP (u16) 0x12
+#define AML_VAR_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */
+#define AML_METHOD_OP (u16) 0x14
+#define AML_DUAL_NAME_PREFIX (u16) 0x2e
+#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f
+#define AML_NAME_CHAR_SUBSEQ (u16) 0x30
+#define AML_NAME_CHAR_FIRST (u16) 0x41
+#define AML_EXTENDED_OP_PREFIX (u16) 0x5b
+#define AML_ROOT_PREFIX (u16) 0x5c
+#define AML_PARENT_PREFIX (u16) 0x5e
+#define AML_LOCAL_OP (u16) 0x60
+#define AML_LOCAL0 (u16) 0x60
+#define AML_LOCAL1 (u16) 0x61
+#define AML_LOCAL2 (u16) 0x62
+#define AML_LOCAL3 (u16) 0x63
+#define AML_LOCAL4 (u16) 0x64
+#define AML_LOCAL5 (u16) 0x65
+#define AML_LOCAL6 (u16) 0x66
+#define AML_LOCAL7 (u16) 0x67
+#define AML_ARG_OP (u16) 0x68
+#define AML_ARG0 (u16) 0x68
+#define AML_ARG1 (u16) 0x69
+#define AML_ARG2 (u16) 0x6a
+#define AML_ARG3 (u16) 0x6b
+#define AML_ARG4 (u16) 0x6c
+#define AML_ARG5 (u16) 0x6d
+#define AML_ARG6 (u16) 0x6e
+#define AML_STORE_OP (u16) 0x70
+#define AML_REF_OF_OP (u16) 0x71
+#define AML_ADD_OP (u16) 0x72
+#define AML_CONCAT_OP (u16) 0x73
+#define AML_SUBTRACT_OP (u16) 0x74
+#define AML_INCREMENT_OP (u16) 0x75
+#define AML_DECREMENT_OP (u16) 0x76
+#define AML_MULTIPLY_OP (u16) 0x77
+#define AML_DIVIDE_OP (u16) 0x78
+#define AML_SHIFT_LEFT_OP (u16) 0x79
+#define AML_SHIFT_RIGHT_OP (u16) 0x7a
+#define AML_BIT_AND_OP (u16) 0x7b
+#define AML_BIT_NAND_OP (u16) 0x7c
+#define AML_BIT_OR_OP (u16) 0x7d
+#define AML_BIT_NOR_OP (u16) 0x7e
+#define AML_BIT_XOR_OP (u16) 0x7f
+#define AML_BIT_NOT_OP (u16) 0x80
+#define AML_FIND_SET_LEFT_BIT_OP (u16) 0x81
+#define AML_FIND_SET_RIGHT_BIT_OP (u16) 0x82
+#define AML_DEREF_OF_OP (u16) 0x83
+#define AML_CONCAT_RES_OP (u16) 0x84 /* ACPI 2.0 */
+#define AML_MOD_OP (u16) 0x85 /* ACPI 2.0 */
+#define AML_NOTIFY_OP (u16) 0x86
+#define AML_SIZE_OF_OP (u16) 0x87
+#define AML_INDEX_OP (u16) 0x88
+#define AML_MATCH_OP (u16) 0x89
+#define AML_CREATE_DWORD_FIELD_OP (u16) 0x8a
+#define AML_CREATE_WORD_FIELD_OP (u16) 0x8b
+#define AML_CREATE_BYTE_FIELD_OP (u16) 0x8c
+#define AML_CREATE_BIT_FIELD_OP (u16) 0x8d
+#define AML_TYPE_OP (u16) 0x8e
+#define AML_CREATE_QWORD_FIELD_OP (u16) 0x8f /* ACPI 2.0 */
+#define AML_LAND_OP (u16) 0x90
+#define AML_LOR_OP (u16) 0x91
+#define AML_LNOT_OP (u16) 0x92
+#define AML_LEQUAL_OP (u16) 0x93
+#define AML_LGREATER_OP (u16) 0x94
+#define AML_LLESS_OP (u16) 0x95
+#define AML_TO_BUFFER_OP (u16) 0x96 /* ACPI 2.0 */
+#define AML_TO_DECSTRING_OP (u16) 0x97 /* ACPI 2.0 */
+#define AML_TO_HEXSTRING_OP (u16) 0x98 /* ACPI 2.0 */
+#define AML_TO_INTEGER_OP (u16) 0x99 /* ACPI 2.0 */
+#define AML_TO_STRING_OP (u16) 0x9c /* ACPI 2.0 */
+#define AML_COPY_OP (u16) 0x9d /* ACPI 2.0 */
+#define AML_MID_OP (u16) 0x9e /* ACPI 2.0 */
+#define AML_CONTINUE_OP (u16) 0x9f /* ACPI 2.0 */
+#define AML_IF_OP (u16) 0xa0
+#define AML_ELSE_OP (u16) 0xa1
+#define AML_WHILE_OP (u16) 0xa2
+#define AML_NOOP_OP (u16) 0xa3
+#define AML_RETURN_OP (u16) 0xa4
+#define AML_BREAK_OP (u16) 0xa5
+#define AML_BREAK_POINT_OP (u16) 0xcc
+#define AML_ONES_OP (u16) 0xff
+
+/* prefixed opcodes */
+
+#define AML_EXTENDED_OPCODE (u16) 0x5b00 /* prefix for 2-byte opcodes */
+
+#define AML_MUTEX_OP (u16) 0x5b01
+#define AML_EVENT_OP (u16) 0x5b02
+#define AML_SHIFT_RIGHT_BIT_OP (u16) 0x5b10
+#define AML_SHIFT_LEFT_BIT_OP (u16) 0x5b11
+#define AML_COND_REF_OF_OP (u16) 0x5b12
+#define AML_CREATE_FIELD_OP (u16) 0x5b13
+#define AML_LOAD_TABLE_OP (u16) 0x5b1f /* ACPI 2.0 */
+#define AML_LOAD_OP (u16) 0x5b20
+#define AML_STALL_OP (u16) 0x5b21
+#define AML_SLEEP_OP (u16) 0x5b22
+#define AML_ACQUIRE_OP (u16) 0x5b23
+#define AML_SIGNAL_OP (u16) 0x5b24
+#define AML_WAIT_OP (u16) 0x5b25
+#define AML_RESET_OP (u16) 0x5b26
+#define AML_RELEASE_OP (u16) 0x5b27
+#define AML_FROM_BCD_OP (u16) 0x5b28
+#define AML_TO_BCD_OP (u16) 0x5b29
+#define AML_UNLOAD_OP (u16) 0x5b2a
+#define AML_REVISION_OP (u16) 0x5b30
+#define AML_DEBUG_OP (u16) 0x5b31
+#define AML_FATAL_OP (u16) 0x5b32
+#define AML_TIMER_OP (u16) 0x5b33 /* ACPI 3.0 */
+#define AML_REGION_OP (u16) 0x5b80
+#define AML_FIELD_OP (u16) 0x5b81
+#define AML_DEVICE_OP (u16) 0x5b82
+#define AML_PROCESSOR_OP (u16) 0x5b83
+#define AML_POWER_RES_OP (u16) 0x5b84
+#define AML_THERMAL_ZONE_OP (u16) 0x5b85
+#define AML_INDEX_FIELD_OP (u16) 0x5b86
+#define AML_BANK_FIELD_OP (u16) 0x5b87
+#define AML_DATA_REGION_OP (u16) 0x5b88 /* ACPI 2.0 */
+
+/*
+ * Combination opcodes (actually two one-byte opcodes)
+ * Used by the disassembler and i_aSL compiler
+ */
+#define AML_LGREATEREQUAL_OP (u16) 0x9295
+#define AML_LLESSEQUAL_OP (u16) 0x9294
+#define AML_LNOTEQUAL_OP (u16) 0x9293
+
+/*
+ * Internal opcodes
+ * Use only "Unknown" AML opcodes, don't attempt to use
+ * any valid ACPI ASCII values (A-Z, 0-9, '-')
+ */
+#define AML_INT_NAMEPATH_OP (u16) 0x002d
+#define AML_INT_NAMEDFIELD_OP (u16) 0x0030
+#define AML_INT_RESERVEDFIELD_OP (u16) 0x0031
+#define AML_INT_ACCESSFIELD_OP (u16) 0x0032
+#define AML_INT_BYTELIST_OP (u16) 0x0033
+#define AML_INT_STATICSTRING_OP (u16) 0x0034
+#define AML_INT_METHODCALL_OP (u16) 0x0035
+#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
+#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
+
+#define ARG_NONE 0x0
+
+/*
+ * Argument types for the AML Parser
+ * Each field in the arg_types u32 is 5 bits, allowing for a maximum of 6 arguments.
+ * There can be up to 31 unique argument types
+ * Zero is reserved as end-of-list indicator
+ */
+#define ARGP_BYTEDATA 0x01
+#define ARGP_BYTELIST 0x02
+#define ARGP_CHARLIST 0x03
+#define ARGP_DATAOBJ 0x04
+#define ARGP_DATAOBJLIST 0x05
+#define ARGP_DWORDDATA 0x06
+#define ARGP_FIELDLIST 0x07
+#define ARGP_NAME 0x08
+#define ARGP_NAMESTRING 0x09
+#define ARGP_OBJLIST 0x0A
+#define ARGP_PKGLENGTH 0x0B
+#define ARGP_SUPERNAME 0x0C
+#define ARGP_TARGET 0x0D
+#define ARGP_TERMARG 0x0E
+#define ARGP_TERMLIST 0x0F
+#define ARGP_WORDDATA 0x10
+#define ARGP_QWORDDATA 0x11
+#define ARGP_SIMPLENAME 0x12
+
+/*
+ * Resolved argument types for the AML Interpreter
+ * Each field in the arg_types u32 is 5 bits, allowing for a maximum of 6 arguments.
+ * There can be up to 31 unique argument types (0 is end-of-arg-list indicator)
+ *
+ * Note1: These values are completely independent from the ACPI_TYPEs
+ * i.e., ARGI_INTEGER != ACPI_TYPE_INTEGER
+ *
+ * Note2: If and when 5 bits becomes insufficient, it would probably be best
+ * to convert to a 6-byte array of argument types, allowing 8 bits per argument.
+ */
+
+/* Single, simple types */
+
+#define ARGI_ANYTYPE 0x01 /* Don't care */
+#define ARGI_PACKAGE 0x02
+#define ARGI_EVENT 0x03
+#define ARGI_MUTEX 0x04
+#define ARGI_DDBHANDLE 0x05
+
+/* Interchangeable types (via implicit conversion) */
+
+#define ARGI_INTEGER 0x06
+#define ARGI_STRING 0x07
+#define ARGI_BUFFER 0x08
+#define ARGI_BUFFER_OR_STRING 0x09 /* Used by MID op only */
+#define ARGI_COMPUTEDATA 0x0A /* Buffer, String, or Integer */
+
+/* Reference objects */
+
+#define ARGI_INTEGER_REF 0x0B
+#define ARGI_OBJECT_REF 0x0C
+#define ARGI_DEVICE_REF 0x0D
+#define ARGI_REFERENCE 0x0E
+#define ARGI_TARGETREF 0x0F /* Target, subject to implicit conversion */
+#define ARGI_FIXED_TARGET 0x10 /* Target, no implicit conversion */
+#define ARGI_SIMPLE_TARGET 0x11 /* Name, Local, Arg -- no implicit conversion */
+
+/* Multiple/complex types */
+
+#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator */
+#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */
+#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */
+#define ARGI_REGION_OR_FIELD 0x15 /* Used by LOAD op only */
+#define ARGI_DATAREFOBJ 0x16
+
+/* Note: types above can expand to 0x1F maximum */
+
+#define ARGI_INVALID_OPCODE 0xFFFFFFFF
+
+/*
+ * hash offsets
+ */
+#define AML_EXTOP_HASH_OFFSET 22
+#define AML_LNOT_HASH_OFFSET 19
+
+/*
+ * opcode groups and types
+ */
+#define OPGRP_NAMED 0x01
+#define OPGRP_FIELD 0x02
+#define OPGRP_BYTELIST 0x04
+
+/*
+ * Opcode information
+ */
+
+/* Opcode flags */
+
+#define AML_LOGICAL 0x0001
+#define AML_LOGICAL_NUMERIC 0x0002
+#define AML_MATH 0x0004
+#define AML_CREATE 0x0008
+#define AML_FIELD 0x0010
+#define AML_DEFER 0x0020
+#define AML_NAMED 0x0040
+#define AML_NSNODE 0x0080
+#define AML_NSOPCODE 0x0100
+#define AML_NSOBJECT 0x0200
+#define AML_HAS_RETVAL 0x0400
+#define AML_HAS_TARGET 0x0800
+#define AML_HAS_ARGS 0x1000
+#define AML_CONSTANT 0x2000
+#define AML_NO_OPERAND_RESOLVE 0x4000
+
+/* Convenient flag groupings */
+
+#define AML_FLAGS_EXEC_0A_0T_1R AML_HAS_RETVAL
+#define AML_FLAGS_EXEC_1A_0T_0R AML_HAS_ARGS /* Monadic1 */
+#define AML_FLAGS_EXEC_1A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL /* Monadic2 */
+#define AML_FLAGS_EXEC_1A_1T_0R AML_HAS_ARGS | AML_HAS_TARGET
+#define AML_FLAGS_EXEC_1A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL /* monadic2_r */
+#define AML_FLAGS_EXEC_2A_0T_0R AML_HAS_ARGS /* Dyadic1 */
+#define AML_FLAGS_EXEC_2A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL /* Dyadic2 */
+#define AML_FLAGS_EXEC_2A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL /* dyadic2_r */
+#define AML_FLAGS_EXEC_2A_2T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL
+#define AML_FLAGS_EXEC_3A_0T_0R AML_HAS_ARGS
+#define AML_FLAGS_EXEC_3A_1T_1R AML_HAS_ARGS | AML_HAS_TARGET | AML_HAS_RETVAL
+#define AML_FLAGS_EXEC_6A_0T_1R AML_HAS_ARGS | AML_HAS_RETVAL
+
+/*
+ * The opcode Type is used in a dispatch table, do not change
+ * without updating the table.
+ */
+#define AML_TYPE_EXEC_0A_0T_1R 0x00
+#define AML_TYPE_EXEC_1A_0T_0R 0x01 /* Monadic1 */
+#define AML_TYPE_EXEC_1A_0T_1R 0x02 /* Monadic2 */
+#define AML_TYPE_EXEC_1A_1T_0R 0x03
+#define AML_TYPE_EXEC_1A_1T_1R 0x04 /* monadic2_r */
+#define AML_TYPE_EXEC_2A_0T_0R 0x05 /* Dyadic1 */
+#define AML_TYPE_EXEC_2A_0T_1R 0x06 /* Dyadic2 */
+#define AML_TYPE_EXEC_2A_1T_1R 0x07 /* dyadic2_r */
+#define AML_TYPE_EXEC_2A_2T_1R 0x08
+#define AML_TYPE_EXEC_3A_0T_0R 0x09
+#define AML_TYPE_EXEC_3A_1T_1R 0x0A
+#define AML_TYPE_EXEC_6A_0T_1R 0x0B
+/* End of types used in dispatch table */
+
+#define AML_TYPE_LITERAL 0x0B
+#define AML_TYPE_CONSTANT 0x0C
+#define AML_TYPE_METHOD_ARGUMENT 0x0D
+#define AML_TYPE_LOCAL_VARIABLE 0x0E
+#define AML_TYPE_DATA_TERM 0x0F
+
+/* Generic for an op that returns a value */
+
+#define AML_TYPE_METHOD_CALL 0x10
+
+/* Misc */
+
+#define AML_TYPE_CREATE_FIELD 0x11
+#define AML_TYPE_CREATE_OBJECT 0x12
+#define AML_TYPE_CONTROL 0x13
+#define AML_TYPE_NAMED_NO_OBJ 0x14
+#define AML_TYPE_NAMED_FIELD 0x15
+#define AML_TYPE_NAMED_SIMPLE 0x16
+#define AML_TYPE_NAMED_COMPLEX 0x17
+#define AML_TYPE_RETURN 0x18
+
+#define AML_TYPE_UNDEFINED 0x19
+#define AML_TYPE_BOGUS 0x1A
+
+/* AML Package Length encodings */
+
+#define ACPI_AML_PACKAGE_TYPE1 0x40
+#define ACPI_AML_PACKAGE_TYPE2 0x4000
+#define ACPI_AML_PACKAGE_TYPE3 0x400000
+#define ACPI_AML_PACKAGE_TYPE4 0x40000000
+
+/*
+ * Opcode classes
+ */
+#define AML_CLASS_EXECUTE 0x00
+#define AML_CLASS_CREATE 0x01
+#define AML_CLASS_ARGUMENT 0x02
+#define AML_CLASS_NAMED_OBJECT 0x03
+#define AML_CLASS_CONTROL 0x04
+#define AML_CLASS_ASCII 0x05
+#define AML_CLASS_PREFIX 0x06
+#define AML_CLASS_INTERNAL 0x07
+#define AML_CLASS_RETURN_VALUE 0x08
+#define AML_CLASS_METHOD_CALL 0x09
+#define AML_CLASS_UNKNOWN 0x0A
+
+/* Predefined Operation Region space_iDs */
+
+typedef enum {
+ REGION_MEMORY = 0,
+ REGION_IO,
+ REGION_PCI_CONFIG,
+ REGION_EC,
+ REGION_SMBUS,
+ REGION_CMOS,
+ REGION_PCI_BAR,
+ REGION_DATA_TABLE, /* Internal use only */
+ REGION_FIXED_HW = 0x7F
+} AML_REGION_TYPES;
+
+/* Comparison operation codes for match_op operator */
+
+typedef enum {
+ MATCH_MTR = 0,
+ MATCH_MEQ = 1,
+ MATCH_MLE = 2,
+ MATCH_MLT = 3,
+ MATCH_MGE = 4,
+ MATCH_MGT = 5
+} AML_MATCH_OPERATOR;
+
+#define MAX_MATCH_OPERATOR 5
+
+/*
+ * field_flags
+ *
+ * This byte is extracted from the AML and includes three separate
+ * pieces of information about the field:
+ * 1) The field access type
+ * 2) The field update rule
+ * 3) The lock rule for the field
+ *
+ * Bits 00 - 03 : access_type (any_acc, byte_acc, etc.)
+ * 04 : lock_rule (1 == Lock)
+ * 05 - 06 : update_rule
+ */
+#define AML_FIELD_ACCESS_TYPE_MASK 0x0F
+#define AML_FIELD_LOCK_RULE_MASK 0x10
+#define AML_FIELD_UPDATE_RULE_MASK 0x60
+
+/* 1) Field Access Types */
+
+typedef enum {
+ AML_FIELD_ACCESS_ANY = 0x00,
+ AML_FIELD_ACCESS_BYTE = 0x01,
+ AML_FIELD_ACCESS_WORD = 0x02,
+ AML_FIELD_ACCESS_DWORD = 0x03,
+ AML_FIELD_ACCESS_QWORD = 0x04, /* ACPI 2.0 */
+ AML_FIELD_ACCESS_BUFFER = 0x05 /* ACPI 2.0 */
+} AML_ACCESS_TYPE;
+
+/* 2) Field Lock Rules */
+
+typedef enum {
+ AML_FIELD_LOCK_NEVER = 0x00,
+ AML_FIELD_LOCK_ALWAYS = 0x10
+} AML_LOCK_RULE;
+
+/* 3) Field Update Rules */
+
+typedef enum {
+ AML_FIELD_UPDATE_PRESERVE = 0x00,
+ AML_FIELD_UPDATE_WRITE_AS_ONES = 0x20,
+ AML_FIELD_UPDATE_WRITE_AS_ZEROS = 0x40
+} AML_UPDATE_RULE;
+
+/*
+ * Field Access Attributes.
+ * This byte is extracted from the AML via the
+ * access_as keyword
+ */
+typedef enum {
+ AML_FIELD_ATTRIB_SMB_QUICK = 0x02,
+ AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04,
+ AML_FIELD_ATTRIB_SMB_BYTE = 0x06,
+ AML_FIELD_ATTRIB_SMB_WORD = 0x08,
+ AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A,
+ AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C,
+ AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
+} AML_ACCESS_ATTRIBUTE;
+
+/* Bit fields in method_flags byte */
+
+#define AML_METHOD_ARG_COUNT 0x07
+#define AML_METHOD_SERIALIZED 0x08
+#define AML_METHOD_SYNCH_LEVEL 0xF0
+
+/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
+
+#define AML_METHOD_INTERNAL_ONLY 0x01
+#define AML_METHOD_RESERVED1 0x02
+#define AML_METHOD_RESERVED2 0x04
+
+#endif /* __AMLCODE_H__ */
--- /dev/null
+
+/******************************************************************************
+ *
+ * Module Name: amlresrc.h - AML resource descriptors
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
+#ifndef __AMLRESRC_H
+#define __AMLRESRC_H
+
+/*
+ * Resource descriptor tags, as defined in the ACPI specification.
+ * Used to symbolically reference fields within a descriptor.
+ */
+#define ACPI_RESTAG_ADDRESS "_ADR"
+#define ACPI_RESTAG_ALIGNMENT "_ALN"
+#define ACPI_RESTAG_ADDRESSSPACE "_ASI"
+#define ACPI_RESTAG_ACCESSSIZE "_ASZ"
+#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
+#define ACPI_RESTAG_BASEADDRESS "_BAS"
+#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
+#define ACPI_RESTAG_DECODE "_DEC"
+#define ACPI_RESTAG_DMA "_DMA"
+#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_GRANULARITY "_GRA"
+#define ACPI_RESTAG_INTERRUPT "_INT"
+#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
+#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
+#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
+#define ACPI_RESTAG_LENGTH "_LEN"
+#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
+#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
+#define ACPI_RESTAG_MAXADDR "_MAX"
+#define ACPI_RESTAG_MINADDR "_MIN"
+#define ACPI_RESTAG_MAXTYPE "_MAF"
+#define ACPI_RESTAG_MINTYPE "_MIF"
+#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
+#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
+#define ACPI_RESTAG_RANGETYPE "_RNG"
+#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_TRANSLATION "_TRA"
+#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
+#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
+#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
+
+/* Default sizes for "small" resource descriptors */
+
+#define ASL_RDESC_IRQ_SIZE 0x02
+#define ASL_RDESC_DMA_SIZE 0x02
+#define ASL_RDESC_ST_DEPEND_SIZE 0x00
+#define ASL_RDESC_END_DEPEND_SIZE 0x00
+#define ASL_RDESC_IO_SIZE 0x07
+#define ASL_RDESC_FIXED_IO_SIZE 0x03
+#define ASL_RDESC_END_TAG_SIZE 0x01
+
+struct asl_resource_node {
+ u32 buffer_length;
+ void *buffer;
+ struct asl_resource_node *next;
+};
+
+/* Macros used to generate AML resource length fields */
+
+#define ACPI_AML_SIZE_LARGE(r) (sizeof (r) - sizeof (struct aml_resource_large_header))
+#define ACPI_AML_SIZE_SMALL(r) (sizeof (r) - sizeof (struct aml_resource_small_header))
+
+/*
+ * Resource descriptors defined in the ACPI specification.
+ *
+ * Packing/alignment must be BYTE because these descriptors
+ * are used to overlay the raw AML byte stream.
+ */
+#pragma pack(1)
+
+/*
+ * SMALL descriptors
+ */
+#define AML_RESOURCE_SMALL_HEADER_COMMON \
+ u8 descriptor_type;
+
+struct aml_resource_small_header {
+AML_RESOURCE_SMALL_HEADER_COMMON};
+
+struct aml_resource_irq {
+ AML_RESOURCE_SMALL_HEADER_COMMON u16 irq_mask;
+ u8 flags;
+};
+
+struct aml_resource_irq_noflags {
+ AML_RESOURCE_SMALL_HEADER_COMMON u16 irq_mask;
+};
+
+struct aml_resource_dma {
+ AML_RESOURCE_SMALL_HEADER_COMMON u8 dma_channel_mask;
+ u8 flags;
+};
+
+struct aml_resource_start_dependent {
+ AML_RESOURCE_SMALL_HEADER_COMMON u8 flags;
+};
+
+struct aml_resource_start_dependent_noprio {
+AML_RESOURCE_SMALL_HEADER_COMMON};
+
+struct aml_resource_end_dependent {
+AML_RESOURCE_SMALL_HEADER_COMMON};
+
+struct aml_resource_io {
+ AML_RESOURCE_SMALL_HEADER_COMMON u8 flags;
+ u16 minimum;
+ u16 maximum;
+ u8 alignment;
+ u8 address_length;
+};
+
+struct aml_resource_fixed_io {
+ AML_RESOURCE_SMALL_HEADER_COMMON u16 address;
+ u8 address_length;
+};
+
+struct aml_resource_vendor_small {
+AML_RESOURCE_SMALL_HEADER_COMMON};
+
+struct aml_resource_end_tag {
+ AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
+};
+
+/*
+ * LARGE descriptors
+ */
+#define AML_RESOURCE_LARGE_HEADER_COMMON \
+ u8 descriptor_type;\
+ u16 resource_length;
+
+struct aml_resource_large_header {
+AML_RESOURCE_LARGE_HEADER_COMMON};
+
+struct aml_resource_memory24 {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
+ u16 minimum;
+ u16 maximum;
+ u16 alignment;
+ u16 address_length;
+};
+
+struct aml_resource_vendor_large {
+AML_RESOURCE_LARGE_HEADER_COMMON};
+
+struct aml_resource_memory32 {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
+ u32 minimum;
+ u32 maximum;
+ u32 alignment;
+ u32 address_length;
+};
+
+struct aml_resource_fixed_memory32 {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
+ u32 address;
+ u32 address_length;
+};
+
+#define AML_RESOURCE_ADDRESS_COMMON \
+ u8 resource_type; \
+ u8 flags; \
+ u8 specific_flags;
+
+struct aml_resource_address {
+AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_ADDRESS_COMMON};
+
+struct aml_resource_extended_address64 {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_ADDRESS_COMMON u8 revision_iD;
+ u8 reserved;
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+ u64 type_specific;
+};
+
+#define AML_RESOURCE_EXTENDED_ADDRESS_REVISION 1 /* ACPI 3.0 */
+
+struct aml_resource_address64 {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_ADDRESS_COMMON u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+};
+
+struct aml_resource_address32 {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_ADDRESS_COMMON u32 granularity;
+ u32 minimum;
+ u32 maximum;
+ u32 translation_offset;
+ u32 address_length;
+};
+
+struct aml_resource_address16 {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_ADDRESS_COMMON u16 granularity;
+ u16 minimum;
+ u16 maximum;
+ u16 translation_offset;
+ u16 address_length;
+};
+
+struct aml_resource_extended_irq {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
+ u8 interrupt_count;
+ u32 interrupts[1];
+ /* res_source_index, res_source optional fields follow */
+};
+
+struct aml_resource_generic_register {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 address_space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size; /* ACPI 3.0, was previously Reserved */
+ u64 address;
+};
+
+/* restore default alignment */
+
+#pragma pack()
+
+/* Union of all resource descriptors, so we can allocate the worst case */
+
+union aml_resource {
+ /* Descriptor headers */
+
+ u8 descriptor_type;
+ struct aml_resource_small_header small_header;
+ struct aml_resource_large_header large_header;
+
+ /* Small resource descriptors */
+
+ struct aml_resource_irq irq;
+ struct aml_resource_dma dma;
+ struct aml_resource_start_dependent start_dpf;
+ struct aml_resource_end_dependent end_dpf;
+ struct aml_resource_io io;
+ struct aml_resource_fixed_io fixed_io;
+ struct aml_resource_vendor_small vendor_small;
+ struct aml_resource_end_tag end_tag;
+
+ /* Large resource descriptors */
+
+ struct aml_resource_memory24 memory24;
+ struct aml_resource_generic_register generic_reg;
+ struct aml_resource_vendor_large vendor_large;
+ struct aml_resource_memory32 memory32;
+ struct aml_resource_fixed_memory32 fixed_memory32;
+ struct aml_resource_address16 address16;
+ struct aml_resource_address32 address32;
+ struct aml_resource_address64 address64;
+ struct aml_resource_extended_address64 ext_address64;
+ struct aml_resource_extended_irq extended_irq;
+
+ /* Utility overlays */
+
+ struct aml_resource_address address;
+ u32 dword_item;
+ u16 word_item;
+ u8 byte_item;
+};
+
+#endif
--- /dev/null
+#ifndef __ACPI_CONTAINER_H
+#define __ACPI_CONTAINER_H
+
+#include <linux/kernel.h>
+
+struct acpi_container {
+ acpi_handle handle;
+ unsigned long sun;
+ int state;
+};
+
+#endif /* __ACPI_CONTAINER_H */
--- /dev/null
+
+/* _PDC bit definition for Intel processors */
+
+#ifndef __PDC_INTEL_H__
+#define __PDC_INTEL_H__
+
+#define ACPI_PDC_P_FFH (0x0001)
+#define ACPI_PDC_C_C1_HALT (0x0002)
+#define ACPI_PDC_T_FFH (0x0004)
+#define ACPI_PDC_SMP_C1PT (0x0008)
+#define ACPI_PDC_SMP_C2C3 (0x0010)
+#define ACPI_PDC_SMP_P_SWCOORD (0x0020)
+#define ACPI_PDC_SMP_C_SWCOORD (0x0040)
+#define ACPI_PDC_SMP_T_SWCOORD (0x0080)
+#define ACPI_PDC_C_C1_FFH (0x0100)
+
+#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
+ ACPI_PDC_C_C1_HALT | \
+ ACPI_PDC_P_FFH)
+
+#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
+ ACPI_PDC_SMP_C1PT | \
+ ACPI_PDC_C_C1_HALT)
+
+#endif /* __PDC_INTEL_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acenv.h - Generation environment specific items
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACENV_H__
+#define __ACENV_H__
+
+/*
+ * Configuration for ACPI tools and utilities
+ */
+
+#ifdef ACPI_LIBRARY
+/*
+ * Note: The non-debug version of the acpi_library does not contain any
+ * debug support, for minimimal size. The debug version uses ACPI_FULL_DEBUG
+ */
+#define ACPI_USE_LOCAL_CACHE
+#endif
+
+#ifdef ACPI_ASL_COMPILER
+#define ACPI_DEBUG_OUTPUT
+#define ACPI_APPLICATION
+#define ACPI_DISASSEMBLER
+#define ACPI_CONSTANT_EVAL_ONLY
+#define ACPI_LARGE_NAMESPACE_NODE
+#define ACPI_DATA_TABLE_DISASSEMBLY
+#endif
+
+#ifdef ACPI_EXEC_APP
+#undef DEBUGGER_THREADING
+#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED
+#define ACPI_FULL_DEBUG
+#define ACPI_APPLICATION
+#define ACPI_DEBUGGER
+#define ACPI_MUTEX_DEBUG
+#define ACPI_DBG_TRACK_ALLOCATIONS
+#endif
+
+#ifdef ACPI_DASM_APP
+#ifndef MSDOS
+#define ACPI_DEBUG_OUTPUT
+#endif
+#define ACPI_APPLICATION
+#define ACPI_DISASSEMBLER
+#define ACPI_NO_METHOD_EXECUTION
+#define ACPI_LARGE_NAMESPACE_NODE
+#define ACPI_DATA_TABLE_DISASSEMBLY
+#endif
+
+#ifdef ACPI_APPLICATION
+#define ACPI_USE_SYSTEM_CLIBRARY
+#define ACPI_USE_LOCAL_CACHE
+#endif
+
+#ifdef ACPI_FULL_DEBUG
+#define ACPI_DEBUGGER
+#define ACPI_DEBUG_OUTPUT
+#define ACPI_DISASSEMBLER
+#endif
+
+/*
+ * Environment configuration. The purpose of this file is to interface to the
+ * local generation environment.
+ *
+ * 1) ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library.
+ * Otherwise, local versions of string/memory functions will be used.
+ * 2) ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
+ * the standard header files may be used.
+ *
+ * The ACPI subsystem only uses low level C library functions that do not call
+ * operating system services and may therefore be inlined in the code.
+ *
+ * It may be necessary to tailor these include files to the target
+ * generation environment.
+ *
+ *
+ * Functions and constants used from each header:
+ *
+ * string.h: memcpy
+ * memset
+ * strcat
+ * strcmp
+ * strcpy
+ * strlen
+ * strncmp
+ * strncat
+ * strncpy
+ *
+ * stdlib.h: strtoul
+ *
+ * stdarg.h: va_list
+ * va_arg
+ * va_start
+ * va_end
+ *
+ */
+
+/*! [Begin] no source code translation */
+
+#if defined(__linux__)
+#include "aclinux.h"
+
+#elif defined(_AED_EFI)
+#include "acefi.h"
+
+#elif defined(WIN32)
+#include "acwin.h"
+
+#elif defined(WIN64)
+#include "acwin64.h"
+
+#elif defined(MSDOS) /* Must appear after WIN32 and WIN64 check */
+#include "acdos16.h"
+
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+#include "acfreebsd.h"
+
+#elif defined(__NetBSD__)
+#include "acnetbsd.h"
+
+#elif defined(MODESTO)
+#include "acmodesto.h"
+
+#elif defined(NETWARE)
+#include "acnetware.h"
+
+#elif defined(__sun)
+#include "acsolaris.h"
+
+#else
+
+/* All other environments */
+
+#define ACPI_USE_STANDARD_HEADERS
+
+#define COMPILER_DEPENDENT_INT64 long long
+#define COMPILER_DEPENDENT_UINT64 unsigned long long
+
+#endif
+
+/*! [End] no source code translation !*/
+
+/*
+ * Debugger threading model
+ * Use single threaded if the entire subsystem is contained in an application
+ * Use multiple threaded when the subsystem is running in the kernel.
+ *
+ * By default the model is single threaded if ACPI_APPLICATION is set,
+ * multi-threaded if ACPI_APPLICATION is not set.
+ */
+#define DEBUGGER_SINGLE_THREADED 0
+#define DEBUGGER_MULTI_THREADED 1
+
+#ifndef DEBUGGER_THREADING
+#ifdef ACPI_APPLICATION
+#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED
+
+#else
+#define DEBUGGER_THREADING DEBUGGER_MULTI_THREADED
+#endif
+#endif /* !DEBUGGER_THREADING */
+
+/******************************************************************************
+ *
+ * C library configuration
+ *
+ *****************************************************************************/
+
+#define ACPI_IS_ASCII(c) ((c) < 0x80)
+
+#ifdef ACPI_USE_SYSTEM_CLIBRARY
+/*
+ * Use the standard C library headers.
+ * We want to keep these to a minimum.
+ */
+#ifdef ACPI_USE_STANDARD_HEADERS
+/*
+ * Use the standard headers from the standard locations
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+#endif /* ACPI_USE_STANDARD_HEADERS */
+
+/*
+ * We will be linking to the standard Clib functions
+ */
+#define ACPI_STRSTR(s1,s2) strstr((s1), (s2))
+#define ACPI_STRCHR(s1,c) strchr((s1), (c))
+#define ACPI_STRLEN(s) (acpi_size) strlen((s))
+#define ACPI_STRCPY(d,s) (void) strcpy((d), (s))
+#define ACPI_STRNCPY(d,s,n) (void) strncpy((d), (s), (acpi_size)(n))
+#define ACPI_STRNCMP(d,s,n) strncmp((d), (s), (acpi_size)(n))
+#define ACPI_STRCMP(d,s) strcmp((d), (s))
+#define ACPI_STRCAT(d,s) (void) strcat((d), (s))
+#define ACPI_STRNCAT(d,s,n) strncat((d), (s), (acpi_size)(n))
+#define ACPI_STRTOUL(d,s,n) strtoul((d), (s), (acpi_size)(n))
+#define ACPI_MEMCMP(s1,s2,n) memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
+#define ACPI_MEMCPY(d,s,n) (void) memcpy((d), (s), (acpi_size)(n))
+#define ACPI_MEMSET(d,s,n) (void) memset((d), (s), (acpi_size)(n))
+
+#define ACPI_TOUPPER(i) toupper((int) (i))
+#define ACPI_TOLOWER(i) tolower((int) (i))
+#define ACPI_IS_XDIGIT(i) isxdigit((int) (i))
+#define ACPI_IS_DIGIT(i) isdigit((int) (i))
+#define ACPI_IS_SPACE(i) isspace((int) (i))
+#define ACPI_IS_UPPER(i) isupper((int) (i))
+#define ACPI_IS_PRINT(i) isprint((int) (i))
+#define ACPI_IS_ALPHA(i) isalpha((int) (i))
+
+#else
+
+/******************************************************************************
+ *
+ * Not using native C library, use local implementations
+ *
+ *****************************************************************************/
+
+ /*
+ * Use local definitions of C library macros and functions
+ * NOTE: The function implementations may not be as efficient
+ * as an inline or assembly code implementation provided by a
+ * native C library.
+ */
+
+#ifndef va_arg
+
+#ifndef _VALIST
+#define _VALIST
+typedef char *va_list;
+#endif /* _VALIST */
+
+/*
+ * Storage alignment properties
+ */
+#define _AUPBND (sizeof (acpi_native_int) - 1)
+#define _ADNBND (sizeof (acpi_native_int) - 1)
+
+/*
+ * Variable argument list macro definitions
+ */
+#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd)))
+#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND))))
+#define va_end(ap) (void) 0
+#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND))))
+
+#endif /* va_arg */
+
+#define ACPI_STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2))
+#define ACPI_STRCHR(s1,c) acpi_ut_strchr ((s1), (c))
+#define ACPI_STRLEN(s) (acpi_size) acpi_ut_strlen ((s))
+#define ACPI_STRCPY(d,s) (void) acpi_ut_strcpy ((d), (s))
+#define ACPI_STRNCPY(d,s,n) (void) acpi_ut_strncpy ((d), (s), (acpi_size)(n))
+#define ACPI_STRNCMP(d,s,n) acpi_ut_strncmp ((d), (s), (acpi_size)(n))
+#define ACPI_STRCMP(d,s) acpi_ut_strcmp ((d), (s))
+#define ACPI_STRCAT(d,s) (void) acpi_ut_strcat ((d), (s))
+#define ACPI_STRNCAT(d,s,n) acpi_ut_strncat ((d), (s), (acpi_size)(n))
+#define ACPI_STRTOUL(d,s,n) acpi_ut_strtoul ((d), (s), (acpi_size)(n))
+#define ACPI_MEMCMP(s1,s2,n) acpi_ut_memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
+#define ACPI_MEMCPY(d,s,n) (void) acpi_ut_memcpy ((d), (s), (acpi_size)(n))
+#define ACPI_MEMSET(d,v,n) (void) acpi_ut_memset ((d), (v), (acpi_size)(n))
+#define ACPI_TOUPPER acpi_ut_to_upper
+#define ACPI_TOLOWER acpi_ut_to_lower
+
+#endif /* ACPI_USE_SYSTEM_CLIBRARY */
+
+/******************************************************************************
+ *
+ * Assembly code macros
+ *
+ *****************************************************************************/
+
+/*
+ * Handle platform- and compiler-specific assembly language differences.
+ * These should already have been defined by the platform includes above.
+ *
+ * Notes:
+ * 1) Interrupt 3 is used to break into a debugger
+ * 2) Interrupts are turned off during ACPI register setup
+ */
+
+/* Unrecognized compiler, use defaults */
+
+#ifndef ACPI_ASM_MACROS
+
+/*
+ * Calling conventions:
+ *
+ * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
+ * ACPI_EXTERNAL_XFACE - External ACPI interfaces
+ * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
+ * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
+ */
+#define ACPI_SYSTEM_XFACE
+#define ACPI_EXTERNAL_XFACE
+#define ACPI_INTERNAL_XFACE
+#define ACPI_INTERNAL_VAR_XFACE
+
+#define ACPI_ASM_MACROS
+#define BREAKPOINT3
+#define ACPI_DISABLE_IRQS()
+#define ACPI_ENABLE_IRQS()
+#define ACPI_ACQUIRE_GLOBAL_LOCK(Glptr, acq)
+#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, acq)
+
+#endif /* ACPI_ASM_MACROS */
+
+#ifdef ACPI_APPLICATION
+
+/* Don't want software interrupts within a ring3 application */
+
+#undef BREAKPOINT3
+#define BREAKPOINT3
+#endif
+
+/******************************************************************************
+ *
+ * Compiler-specific information is contained in the compiler-specific
+ * headers.
+ *
+ *****************************************************************************/
+#endif /* __ACENV_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: acgcc.h - GCC specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACGCC_H__
+#define __ACGCC_H__
+
+/* Function name is used for debug output. Non-ANSI, compiler-dependent */
+
+#define ACPI_GET_FUNCTION_NAME __FUNCTION__
+
+/*
+ * This macro is used to tag functions as "printf-like" because
+ * some compilers (like GCC) can catch printf format string problems.
+ */
+#define ACPI_PRINTF_LIKE(c) __attribute__ ((__format__ (__printf__, c, c+1)))
+
+/*
+ * Some compilers complain about unused variables. Sometimes we don't want to
+ * use all the variables (for example, _acpi_module_name). This allows us
+ * to to tell the compiler warning in a per-variable manner that a variable
+ * is unused.
+ */
+#define ACPI_UNUSED_VAR __attribute__ ((unused))
+
+#endif /* __ACGCC_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Name: aclinux.h - OS specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2006, R. Byron Moore
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACLINUX_H__
+#define __ACLINUX_H__
+
+#define ACPI_USE_SYSTEM_CLIBRARY
+#define ACPI_USE_DO_WHILE_0
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/div64.h>
+#include <asm/acpi.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <asm/current.h>
+
+/* Host-dependent types and defines */
+
+#define ACPI_MACHINE_WIDTH BITS_PER_LONG
+#define acpi_cache_t kmem_cache_t
+#define acpi_spinlock spinlock_t *
+#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
+#define strtoul simple_strtoul
+
+/* Full namespace pathname length limit - arbitrary */
+#define ACPI_PATHNAME_MAX 256
+
+#else /* !__KERNEL__ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <unistd.h>
+
+#if defined(__ia64__) || defined(__x86_64__)
+#define ACPI_MACHINE_WIDTH 64
+#define COMPILER_DEPENDENT_INT64 long
+#define COMPILER_DEPENDENT_UINT64 unsigned long
+#else
+#define ACPI_MACHINE_WIDTH 32
+#define COMPILER_DEPENDENT_INT64 long long
+#define COMPILER_DEPENDENT_UINT64 unsigned long long
+#define ACPI_USE_NATIVE_DIVIDE
+#endif
+
+#define __cdecl
+#define ACPI_FLUSH_CPU_CACHE()
+#endif /* __KERNEL__ */
+
+/* Linux uses GCC */
+
+#include "acgcc.h"
+
+#define acpi_cpu_flags unsigned long
+
+#define acpi_thread_id struct task_struct *
+
+static inline acpi_thread_id acpi_os_get_thread_id(void) { return current; }
+
+/*
+ * The irqs_disabled() check is for resume from RAM.
+ * Interrupts are off during resume, just like they are for boot.
+ * However, boot has (system_state != SYSTEM_RUNNING)
+ * to quiet __might_sleep() in kmalloc() and resume does not.
+ */
+#include <acpi/actypes.h>
+static inline void *acpi_os_allocate(acpi_size size) {
+ return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+}
+static inline void *acpi_os_allocate_zeroed(acpi_size size) {
+ return kzalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+}
+
+static inline void *acpi_os_acquire_object(acpi_cache_t * cache) {
+ return kmem_cache_zalloc(cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+}
+
+#define ACPI_ALLOCATE(a) acpi_os_allocate(a)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a)
+#define ACPI_FREE(a) kfree(a)
+
+#endif /* __ACLINUX_H__ */
--- /dev/null
+#ifndef __ACPI_PROCESSOR_H
+#define __ACPI_PROCESSOR_H
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/cpu.h>
+
+#include <asm/acpi.h>
+
+#define ACPI_PROCESSOR_BUSY_METRIC 10
+
+#define ACPI_PROCESSOR_MAX_POWER 8
+#define ACPI_PROCESSOR_MAX_C2_LATENCY 100
+#define ACPI_PROCESSOR_MAX_C3_LATENCY 1000
+
+#define ACPI_PROCESSOR_MAX_THROTTLING 16
+#define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */
+#define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4
+
+#define ACPI_PDC_REVISION_ID 0x1
+
+#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
+#define ACPI_PSD_REV0_ENTRIES 5
+
+/*
+ * Types of coordination defined in ACPI 3.0. Same macros can be used across
+ * P, C and T states
+ */
+#define DOMAIN_COORD_TYPE_SW_ALL 0xfc
+#define DOMAIN_COORD_TYPE_SW_ANY 0xfd
+#define DOMAIN_COORD_TYPE_HW_ALL 0xfe
+
+/* Power Management */
+
+struct acpi_processor_cx;
+
+struct acpi_power_register {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 reserved;
+ u64 address;
+} __attribute__ ((packed));
+
+struct acpi_processor_cx_policy {
+ u32 count;
+ struct acpi_processor_cx *state;
+ struct {
+ u32 time;
+ u32 ticks;
+ u32 count;
+ u32 bm;
+ } threshold;
+};
+
+struct acpi_processor_cx {
+ u8 valid;
+ u8 type;
+ u32 address;
+ u32 latency;
+ u32 latency_ticks;
+ u32 power;
+ u32 usage;
+ u64 time;
+ struct acpi_processor_cx_policy promotion;
+ struct acpi_processor_cx_policy demotion;
+};
+
+struct acpi_processor_power {
+ struct acpi_processor_cx *state;
+ unsigned long bm_check_timestamp;
+ u32 default_state;
+ u32 bm_activity;
+ int count;
+ struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
+};
+
+/* Performance Management */
+
+struct acpi_psd_package {
+ acpi_integer num_entries;
+ acpi_integer revision;
+ acpi_integer domain;
+ acpi_integer coord_type;
+ acpi_integer num_processors;
+} __attribute__ ((packed));
+
+struct acpi_pct_register {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 reserved;
+ u64 address;
+} __attribute__ ((packed));
+
+struct acpi_processor_px {
+ acpi_integer core_frequency; /* megahertz */
+ acpi_integer power; /* milliWatts */
+ acpi_integer transition_latency; /* microseconds */
+ acpi_integer bus_master_latency; /* microseconds */
+ acpi_integer control; /* control value */
+ acpi_integer status; /* success indicator */
+};
+
+struct acpi_processor_performance {
+ unsigned int state;
+ unsigned int platform_limit;
+ struct acpi_pct_register control_register;
+ struct acpi_pct_register status_register;
+ unsigned int state_count;
+ struct acpi_processor_px *states;
+ struct acpi_psd_package domain_info;
+ cpumask_t shared_cpu_map;
+ unsigned int shared_type;
+};
+
+/* Throttling Control */
+
+struct acpi_processor_tx {
+ u16 power;
+ u16 performance;
+};
+
+struct acpi_processor_throttling {
+ int state;
+ u32 address;
+ u8 duty_offset;
+ u8 duty_width;
+ int state_count;
+ struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING];
+};
+
+/* Limit Interface */
+
+struct acpi_processor_lx {
+ int px; /* performace state */
+ int tx; /* throttle level */
+};
+
+struct acpi_processor_limit {
+ struct acpi_processor_lx state; /* current limit */
+ struct acpi_processor_lx thermal; /* thermal limit */
+ struct acpi_processor_lx user; /* user limit */
+};
+
+struct acpi_processor_flags {
+ u8 power:1;
+ u8 performance:1;
+ u8 throttling:1;
+ u8 limit:1;
+ u8 bm_control:1;
+ u8 bm_check:1;
+ u8 has_cst:1;
+ u8 power_setup_done:1;
+};
+
+struct acpi_processor {
+ acpi_handle handle;
+ u32 acpi_id;
+ u32 id;
+ u32 pblk;
+ int performance_platform_limit;
+ struct acpi_processor_flags flags;
+ struct acpi_processor_power power;
+ struct acpi_processor_performance *performance;
+ struct acpi_processor_throttling throttling;
+ struct acpi_processor_limit limit;
+
+ /* the _PDC objects for this processor, if any */
+ struct acpi_object_list *pdc;
+};
+
+struct acpi_processor_errata {
+ u8 smp;
+ struct {
+ u8 throttle:1;
+ u8 fdma:1;
+ u8 reserved:6;
+ u32 bmisx;
+ } piix4;
+};
+
+extern int acpi_processor_preregister_performance(
+ struct acpi_processor_performance **performance);
+
+extern int acpi_processor_register_performance(struct acpi_processor_performance
+ *performance, unsigned int cpu);
+extern void acpi_processor_unregister_performance(struct
+ acpi_processor_performance
+ *performance,
+ unsigned int cpu);
+
+/* note: this locks both the calling module and the processor module
+ if a _PPC object exists, rmmod is disallowed then */
+int acpi_processor_notify_smm(struct module *calling_module);
+
+/* for communication between multiple parts of the processor kernel module */
+extern struct acpi_processor *processors[NR_CPUS];
+extern struct acpi_processor_errata errata;
+
+void arch_acpi_processor_init_pdc(struct acpi_processor *pr);
+
+#ifdef ARCH_HAS_POWER_INIT
+void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
+ unsigned int cpu);
+#else
+static inline void acpi_processor_power_init_bm_check(struct
+ acpi_processor_flags
+ *flags, unsigned int cpu)
+{
+ flags->bm_check = 1;
+ return;
+}
+#endif
+
+/* in processor_perflib.c */
+
+#ifdef CONFIG_CPU_FREQ
+void acpi_processor_ppc_init(void);
+void acpi_processor_ppc_exit(void);
+int acpi_processor_ppc_has_changed(struct acpi_processor *pr);
+#else
+static inline void acpi_processor_ppc_init(void)
+{
+ return;
+}
+static inline void acpi_processor_ppc_exit(void)
+{
+ return;
+}
+static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+{
+ static unsigned int printout = 1;
+ if (printout) {
+ printk(KERN_WARNING
+ "Warning: Processor Platform Limit event detected, but not handled.\n");
+ printk(KERN_WARNING
+ "Consider compiling CPUfreq support into your kernel.\n");
+ printout = 0;
+ }
+ return 0;
+}
+#endif /* CONFIG_CPU_FREQ */
+
+/* in processor_throttling.c */
+int acpi_processor_get_throttling_info(struct acpi_processor *pr);
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+extern struct file_operations acpi_processor_throttling_fops;
+
+/* in processor_idle.c */
+int acpi_processor_power_init(struct acpi_processor *pr,
+ struct acpi_device *device);
+int acpi_processor_cst_has_changed(struct acpi_processor *pr);
+int acpi_processor_power_exit(struct acpi_processor *pr,
+ struct acpi_device *device);
+
+/* in processor_thermal.c */
+int acpi_processor_get_limit_info(struct acpi_processor *pr);
+extern struct file_operations acpi_processor_limit_fops;
+
+#ifdef CONFIG_CPU_FREQ
+void acpi_thermal_cpufreq_init(void);
+void acpi_thermal_cpufreq_exit(void);
+#else
+static inline void acpi_thermal_cpufreq_init(void)
+{
+ return;
+}
+static inline void acpi_thermal_cpufreq_exit(void)
+{
+ return;
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef _ASM_GENERIC_ATOMIC_H
+#define _ASM_GENERIC_ATOMIC_H
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ * Christoph Lameter <clameter@sgi.com>
+ *
+ * Allows to provide arch independent atomic definitions without the need to
+ * edit all arch specific atomic.h files.
+ */
+
+#include <arch/types.h>
+
+/*
+ * Suppport for atomic_long_t
+ *
+ * Casts for parameters are avoided for existing atomic functions in order to
+ * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
+ * macros of a platform may have.
+ */
+
+#if BITS_PER_LONG == 64
+
+typedef atomic64_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_sub(i, v);
+}
+
+#else
+
+typedef atomic_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_sub(i, v);
+}
+
+#endif
+#endif
--- /dev/null
+#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <arch/types.h>
+
+extern unsigned int hweight32(unsigned int w);
+extern unsigned int hweight16(unsigned int w);
+extern unsigned int hweight8(unsigned int w);
+extern unsigned long hweight64(__u64 w);
+
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
--- /dev/null
+#ifndef _ARCH_GENERIC_BUG_H
+#define _ARCH_GENERIC_BUG_H
+
+#include <lwk/compiler.h>
+
+#ifndef HAVE_ARCH_BUG
+#define BUG() do { \
+ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \
+ panic("BUG!"); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#endif
+
+#ifndef HAVE_ARCH_ASSERT
+#define ASSERT(condition) do { if (unlikely((condition)!=1)) BUG(); } while(0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) do { \
+ if (unlikely((condition)!=0)) { \
+ printk("BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __FUNCTION__); \
+ /* TODO FIX ME */ \
+ /* dump_stack(); */ \
+ } \
+} while (0)
+#endif
+
+#endif
--- /dev/null
+#ifndef _ARCH_GENERIC_DIV64_H
+#define _ARCH_GENERIC_DIV64_H
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
+ * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
+ *
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ * uint32_t remainder = *n % base;
+ * *n = *n / base;
+ * return remainder;
+ * }
+ *
+ * NOTE: macro parameter n is evaluated multiple times,
+ * beware of side effects!
+ */
+
+#include <lwk/types.h>
+#include <lwk/compiler.h>
+
+#if BITS_PER_LONG == 64
+
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+
+#elif BITS_PER_LONG == 32
+
+extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
+ if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = __div64_32(&(n), __base); \
+ __rem; \
+ })
+
+#else /* BITS_PER_LONG == ?? */
+
+# error do_div() does not yet support the C64
+
+#endif /* BITS_PER_LONG */
+
+#endif /* _ARCH_GENERIC_DIV64_H */
--- /dev/null
+#ifndef _ARCH_GENERIC_ERRNO_BASE_H
+#define _ARCH_GENERIC_ERRNO_BASE_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+
+#endif
--- /dev/null
+#ifndef _ARCH_GENERIC_ERRNO_H
+#define _ARCH_GENERIC_ERRNO_H
+
+#include <arch-generic/errno-base.h>
+
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ECANCELED 125 /* Operation Canceled */
+#define ENOKEY 126 /* Required key not available */
+#define EKEYEXPIRED 127 /* Key has expired */
+#define EKEYREVOKED 128 /* Key has been revoked */
+#define EKEYREJECTED 129 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 130 /* Owner died */
+#define ENOTRECOVERABLE 131 /* State not recoverable */
+
+#endif
--- /dev/null
+#ifndef __GENERIC_IO_H
+#define __GENERIC_IO_H
+
+#include <lwk/linkage.h>
+#include <arch/byteorder.h>
+
+/*
+ * These are the "generic" interfaces for doing new-style
+ * memory-mapped or PIO accesses. Architectures may do
+ * their own arch-optimized versions, these just act as
+ * wrappers around the old-style IO register access functions:
+ * read[bwl]/write[bwl]/in[bwl]/out[bwl]
+ *
+ * Don't include this directly, include it from <asm/io.h>.
+ */
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines just encode the PIO/MMIO as part of the
+ * cookie, and coldly assume that the MMIO IO mappings are not
+ * in the low address range. Architectures for which this is not
+ * true can't use this generic implementation.
+ */
+extern unsigned int fastcall ioread8(void __iomem *);
+extern unsigned int fastcall ioread16(void __iomem *);
+extern unsigned int fastcall ioread16be(void __iomem *);
+extern unsigned int fastcall ioread32(void __iomem *);
+extern unsigned int fastcall ioread32be(void __iomem *);
+
+extern void fastcall iowrite8(u8, void __iomem *);
+extern void fastcall iowrite16(u16, void __iomem *);
+extern void fastcall iowrite16be(u16, void __iomem *);
+extern void fastcall iowrite32(u32, void __iomem *);
+extern void fastcall iowrite32be(u32, void __iomem *);
+
+/*
+ * "string" versions of the above. Note that they
+ * use native byte ordering for the accesses (on
+ * the assumption that IO and memory agree on a
+ * byte order, and CPU byteorder is irrelevant).
+ *
+ * They do _not_ update the port address. If you
+ * want MMIO that copies stuff laid out in MMIO
+ * memory across multiple ports, use "memcpy_toio()"
+ * and friends.
+ */
+extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long count);
+extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long count);
+extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+
+extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
+
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+#endif
--- /dev/null
+#ifndef _ASM_GENERIC_MMAN_H
+#define _ASM_GENERIC_MMAN_H
+
+/*
+ Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
+ Based on: asm-xxx/mman.h
+*/
+
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+#define PROT_SEM 0x8 /* page may be used for atomic ops */
+#define PROT_NONE 0x0 /* page can not be accessed */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_TYPE 0x0f /* Mask for type of mapping */
+#define MAP_FIXED 0x10 /* Interpret addr exactly */
+#define MAP_ANONYMOUS 0x20 /* don't use a file */
+
+#define MS_ASYNC 1 /* sync memory asynchronously */
+#define MS_INVALIDATE 2 /* invalidate the caches */
+#define MS_SYNC 4 /* synchronous memory sync */
+
+#define MADV_NORMAL 0 /* no further special treatment */
+#define MADV_RANDOM 1 /* expect random page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_WILLNEED 3 /* will need these pages */
+#define MADV_DONTNEED 4 /* don't need these pages */
+
+/* common parameters: try to keep these consistent across architectures */
+#define MADV_REMOVE 9 /* remove these pages & resources */
+#define MADV_DONTFORK 10 /* don't inherit across fork */
+#define MADV_DOFORK 11 /* do inherit across fork */
+
+/* compatibility flags */
+#define MAP_FILE 0
+
+#endif
--- /dev/null
+#ifndef _ASM_GENERIC_PGTABLE_H
+#define _ASM_GENERIC_PGTABLE_H
+
+#ifndef __HAVE_ARCH_PTEP_ESTABLISH
+/*
+ * Establish a new mapping:
+ * - flush the old one
+ * - update the page tables
+ * - inform the TLB about the new one
+ *
+ * We hold the mm semaphore for reading, and the pte lock.
+ *
+ * Note: the old pte is known to not be writable, so we don't need to
+ * worry about dirty bits etc getting lost.
+ */
+#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ set_pte_atomic(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Largely same as above, but only sets the access flags (dirty,
+ * accessed, and writable). Furthermore, we know it always gets set
+ * to a "more permissive" setting, which allows most architectures
+ * to optimize this.
+ */
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+do { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __address, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ int r = 1; \
+ if (!pte_young(__pte)) \
+ r = 0; \
+ else \
+ set_pte_at((__vma)->vm_mm, (__address), \
+ (__ptep), pte_mkold(__pte)); \
+ r; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
+ if (__young) \
+ flush_tlb_page(__vma, __address); \
+ __young; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
+({ \
+ pte_t __pte = *__ptep; \
+ int r = 1; \
+ if (!pte_dirty(__pte)) \
+ r = 0; \
+ else \
+ set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
+ pte_mkclean(__pte)); \
+ r; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
+({ \
+ int __dirty; \
+ __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
+ if (__dirty) \
+ flush_tlb_page(__vma, __address); \
+ __dirty; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define ptep_get_and_clear(__mm, __address, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ pte_clear((__mm), (__address), (__ptep)); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
+({ \
+ pte_t __pte; \
+ __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTE_CLEAR_FULL
+#define pte_clear_full(__mm, __address, __ptep, __full) \
+do { \
+ pte_clear((__mm), (__address), (__ptep)); \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+#define ptep_clear_flush(__vma, __address, __ptep) \
+({ \
+ pte_t __pte; \
+ __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
+ flush_tlb_page(__vma, __address); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+struct mm_struct;
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (pte_val(A) == pte_val(B))
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
+#define page_test_and_clear_dirty(page) (0)
+#define pte_maybe_dirty(pte) pte_dirty(pte)
+#else
+#define pte_maybe_dirty(pte) (1)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
+#define page_test_and_clear_young(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
+#define lazy_mmu_prot_update(pte) do { } while (0)
+#endif
+
+#ifndef __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary,
+ * or the end address of the range if that comes earlier. Although no
+ * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
+ */
+
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+void pud_clear_bad(pud_t *);
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_GENERIC_PGTABLE_H */
--- /dev/null
+#ifndef _ARCH_GENERIC_SECTIONS_H_
+#define _ARCH_GENERIC_SECTIONS_H_
+
+/* References to section boundaries */
+
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[], __bss_stop[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
+extern char _sextratext[] __attribute__((weak));
+extern char _eextratext[] __attribute__((weak));
+extern char _end[];
+extern char __per_cpu_start[], __per_cpu_end[];
+extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __initdata_begin[], __initdata_end[];
+
+#endif /* _ARCH_GENERIC_SECTIONS_H_ */
--- /dev/null
+#ifndef _ASM_GENERIC_SIGINFO_H
+#define _ASM_GENERIC_SIGINFO_H
+
+#include <lwk/compiler.h>
+#include <lwk/types.h>
+
+typedef union sigval {
+ int sival_int;
+ void __user *sival_ptr;
+} sigval_t;
+
+/*
+ * This is the size (including padding) of the part of the
+ * struct siginfo that is before the union.
+ */
+#ifndef __ARCH_SI_PREAMBLE_SIZE
+#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
+#endif
+
+#define SI_MAX_SIZE 128
+#ifndef SI_PAD_SIZE
+#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
+#endif
+
+#ifndef __ARCH_SI_UID_T
+#define __ARCH_SI_UID_T uid_t
+#endif
+
+/*
+ * The default "si_band" type is "long", as specified by POSIX.
+ * However, some architectures want to override this to "int"
+ * for historical compatibility reasons, so we allow that.
+ */
+#ifndef __ARCH_SI_BAND_T
+#define __ARCH_SI_BAND_T long
+#endif
+
+#ifndef HAVE_ARCH_SIGINFO_T
+
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
+ sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ int _status; /* exit code */
+ clock_t _utime;
+ clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ void __user *_addr; /* faulting insn/memory ref. */
+#ifdef __ARCH_SI_TRAPNO
+ int _trapno; /* TRAP # which caused the signal */
+#endif
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+
+#endif
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid _sifields._kill._pid
+#define si_uid _sifields._kill._uid
+#define si_tid _sifields._timer._tid
+#define si_overrun _sifields._timer._overrun
+#define si_sys_private _sifields._timer._sys_private
+#define si_status _sifields._sigchld._status
+#define si_utime _sifields._sigchld._utime
+#define si_stime _sifields._sigchld._stime
+#define si_value _sifields._rt._sigval
+#define si_int _sifields._rt._sigval.sival_int
+#define si_ptr _sifields._rt._sigval.sival_ptr
+#define si_addr _sifields._sigfault._addr
+#ifdef __ARCH_SI_TRAPNO
+#define si_trapno _sifields._sigfault._trapno
+#endif
+#define si_band _sifields._sigpoll._band
+#define si_fd _sifields._sigpoll._fd
+
+#ifdef __KERNEL__
+#define __SI_MASK 0xffff0000u
+#define __SI_KILL (0 << 16)
+#define __SI_TIMER (1 << 16)
+#define __SI_POLL (2 << 16)
+#define __SI_FAULT (3 << 16)
+#define __SI_CHLD (4 << 16)
+#define __SI_RT (5 << 16)
+#define __SI_MESGQ (6 << 16)
+#define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
+#else
+#define __SI_KILL 0
+#define __SI_TIMER 0
+#define __SI_POLL 0
+#define __SI_FAULT 0
+#define __SI_CHLD 0
+#define __SI_RT 0
+#define __SI_MESGQ 0
+#define __SI_CODE(T,N) (N)
+#endif
+
+/*
+ * si_code values
+ * Digital reserves positive values for kernel-generated signals.
+ */
+#define SI_USER 0 /* sent by kill, sigsend, raise */
+#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define SI_QUEUE -1 /* sent by sigqueue */
+#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
+#define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change */
+#define SI_ASYNCIO -4 /* sent by AIO completion */
+#define SI_SIGIO -5 /* sent by queued SIGIO */
+#define SI_TKILL -6 /* sent by tkill system call */
+#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
+
+#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
+#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
+#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
+#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
+#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
+#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
+#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
+#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
+#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
+#define NSIGILL 8
+
+/*
+ * SIGFPE si_codes
+ */
+#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
+#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
+#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
+#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
+#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
+#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
+#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
+#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
+#define NSIGFPE 8
+
+/*
+ * SIGSEGV si_codes
+ */
+#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
+#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
+#define NSIGSEGV 2
+
+/*
+ * SIGBUS si_codes
+ */
+#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
+#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
+#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
+#define NSIGBUS 3
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
+#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
+#define NSIGTRAP 2
+
+/*
+ * SIGCHLD si_codes
+ */
+#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
+#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
+#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
+#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
+#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
+#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
+#define NSIGCHLD 6
+
+/*
+ * SIGPOLL si_codes
+ */
+#define POLL_IN (__SI_POLL|1) /* data input available */
+#define POLL_OUT (__SI_POLL|2) /* output buffers available */
+#define POLL_MSG (__SI_POLL|3) /* input message available */
+#define POLL_ERR (__SI_POLL|4) /* i/o error */
+#define POLL_PRI (__SI_POLL|5) /* high priority input available */
+#define POLL_HUP (__SI_POLL|6) /* device disconnected */
+#define NSIGPOLL 6
+
+/*
+ * sigevent definitions
+ *
+ * It seems likely that SIGEV_THREAD will have to be handled from
+ * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
+ * thread manager then catches and does the appropriate nonsense.
+ * However, everything is written out here so as to not get lost.
+ */
+#define SIGEV_SIGNAL 0 /* notify via signal */
+#define SIGEV_NONE 1 /* other notification: meaningless */
+#define SIGEV_THREAD 2 /* deliver via thread creation */
+#define SIGEV_THREAD_ID 4 /* deliver to thread */
+
+/*
+ * This works because the alignment is ok on all current architectures
+ * but we leave open this being overridden in the future
+ */
+#ifndef __ARCH_SIGEV_PREAMBLE_SIZE
+#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(int) * 2 + sizeof(sigval_t))
+#endif
+
+#define SIGEV_MAX_SIZE 64
+#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE - __ARCH_SIGEV_PREAMBLE_SIZE) \
+ / sizeof(int))
+
+typedef struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[SIGEV_PAD_SIZE];
+ int _tid;
+
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute; /* really pthread_attr_t */
+ } _sigev_thread;
+ } _sigev_un;
+} sigevent_t;
+
+#define sigev_notify_function _sigev_un._sigev_thread._function
+#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
+#define sigev_notify_thread_id _sigev_un._tid
+
+#ifdef __KERNEL__
+
+struct siginfo;
+void do_schedule_next_timer(struct siginfo *info);
+
+#ifndef HAVE_ARCH_COPY_SIGINFO
+
+#include <lwk/string.h>
+
+static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(*to));
+ else
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+}
+
+#endif
+
+extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from);
+
+#endif /* __KERNEL__ */
+
+#endif
--- /dev/null
+#ifndef __ASM_GENERIC_SIGNAL_H
+#define __ASM_GENERIC_SIGNAL_H
+
+#include <lwk/compiler.h>
+
+#ifndef SIG_BLOCK
+#define SIG_BLOCK 0 /* for blocking signals */
+#endif
+#ifndef SIG_UNBLOCK
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#endif
+#ifndef SIG_SETMASK
+#define SIG_SETMASK 2 /* for setting the signal mask */
+#endif
+
+#ifndef __ASSEMBLY__
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
+
+#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */
+#endif
+
+#endif /* __ASM_GENERIC_SIGNAL_H */
--- /dev/null
+#ifndef LOAD_OFFSET
+#define LOAD_OFFSET 0
+#endif
+
+#ifndef VMLWK_SYMBOL
+#define VMLWK_SYMBOL(_sym_) _sym_
+#endif
+
+/* Align . to a 8 byte boundary equals to maximum function alignment. */
+#define ALIGN_FUNCTION() . = ALIGN(8)
+
+/* .data section */
+#define DATA_DATA \
+ *(.data) \
+ *(.data.init.refok)
+
+#define RO_DATA(align) \
+ . = ALIGN((align)); \
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start_rodata) = .; \
+ *(.rodata) *(.rodata.*) \
+ *(__vermagic) /* Kernel version magic */ \
+ } \
+ \
+ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
+ *(.rodata1) \
+ } \
+ \
+ /* PCI quirks */ \
+ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start_pci_fixups_early) = .; \
+ *(.pci_fixup_early) \
+ VMLWK_SYMBOL(__end_pci_fixups_early) = .; \
+ VMLWK_SYMBOL(__start_pci_fixups_header) = .; \
+ *(.pci_fixup_header) \
+ VMLWK_SYMBOL(__end_pci_fixups_header) = .; \
+ VMLWK_SYMBOL(__start_pci_fixups_final) = .; \
+ *(.pci_fixup_final) \
+ VMLWK_SYMBOL(__end_pci_fixups_final) = .; \
+ VMLWK_SYMBOL(__start_pci_fixups_enable) = .; \
+ *(.pci_fixup_enable) \
+ VMLWK_SYMBOL(__end_pci_fixups_enable) = .; \
+ VMLWK_SYMBOL(__start_pci_fixups_resume) = .; \
+ *(.pci_fixup_resume) \
+ VMLWK_SYMBOL(__end_pci_fixups_resume) = .; \
+ } \
+ \
+ /* RapidIO route ops */ \
+ .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start_rio_route_ops) = .; \
+ *(.rio_route_ops) \
+ VMLWK_SYMBOL(__end_rio_route_ops) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___ksymtab) = .; \
+ *(__ksymtab) \
+ VMLWK_SYMBOL(__stop___ksymtab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___ksymtab_gpl) = .; \
+ *(__ksymtab_gpl) \
+ VMLWK_SYMBOL(__stop___ksymtab_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal unused symbols */ \
+ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___ksymtab_unused) = .; \
+ *(__ksymtab_unused) \
+ VMLWK_SYMBOL(__stop___ksymtab_unused) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___ksymtab_unused_gpl) = .; \
+ *(__ksymtab_unused_gpl) \
+ VMLWK_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-future-only symbols */ \
+ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___ksymtab_gpl_future) = .; \
+ *(__ksymtab_gpl_future) \
+ VMLWK_SYMBOL(__stop___ksymtab_gpl_future) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___kcrctab) = .; \
+ *(__kcrctab) \
+ VMLWK_SYMBOL(__stop___kcrctab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___kcrctab_gpl) = .; \
+ *(__kcrctab_gpl) \
+ VMLWK_SYMBOL(__stop___kcrctab_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal unused symbols */ \
+ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___kcrctab_unused) = .; \
+ *(__kcrctab_unused) \
+ VMLWK_SYMBOL(__stop___kcrctab_unused) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___kcrctab_unused_gpl) = .; \
+ *(__kcrctab_unused_gpl) \
+ VMLWK_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-future-only symbols */ \
+ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___kcrctab_gpl_future) = .; \
+ *(__kcrctab_gpl_future) \
+ VMLWK_SYMBOL(__stop___kcrctab_gpl_future) = .; \
+ } \
+ \
+ /* Kernel symbol table: strings */ \
+ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
+ *(__ksymtab_strings) \
+ } \
+ \
+ /* Built-in module parameters. */ \
+ __param : AT(ADDR(__param) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__start___param) = .; \
+ *(__param) \
+ VMLWK_SYMBOL(__stop___param) = .; \
+ VMLWK_SYMBOL(__end_rodata) = .; \
+ } \
+ \
+ . = ALIGN((align));
+
+/* RODATA provided for backward compatibility.
+ * All archs are supposed to use RO_DATA() */
+#define RODATA RO_DATA(4096)
+
+#define SECURITY_INIT \
+ .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
+ VMLWK_SYMBOL(__security_initcall_start) = .; \
+ *(.security_initcall.init) \
+ VMLWK_SYMBOL(__security_initcall_end) = .; \
+ }
+
+/* .text section. Map to function alignment to avoid address changes
+ * during second ld run in second ld pass when generating System.map */
+#define TEXT_TEXT \
+ ALIGN_FUNCTION(); \
+ *(.text) \
+ *(.text.init.refok)
+
+/* sched.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define SCHED_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLWK_SYMBOL(__sched_text_start) = .; \
+ *(.sched.text) \
+ VMLWK_SYMBOL(__sched_text_end) = .;
+
+/* spinlock.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define LOCK_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLWK_SYMBOL(__lock_text_start) = .; \
+ *(.spinlock.text) \
+ VMLWK_SYMBOL(__lock_text_end) = .;
+
+#define KPROBES_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLWK_SYMBOL(__kprobes_text_start) = .; \
+ *(.kprobes.text) \
+ VMLWK_SYMBOL(__kprobes_text_end) = .;
+
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to
+ the beginning of the section so we begin them at 0. */
+#define DWARF_DEBUG \
+ /* DWARF 1 */ \
+ .debug 0 : { *(.debug) } \
+ .line 0 : { *(.line) } \
+ /* GNU DWARF 1 extensions */ \
+ .debug_srcinfo 0 : { *(.debug_srcinfo) } \
+ .debug_sfnames 0 : { *(.debug_sfnames) } \
+ /* DWARF 1.1 and DWARF 2 */ \
+ .debug_aranges 0 : { *(.debug_aranges) } \
+ .debug_pubnames 0 : { *(.debug_pubnames) } \
+ /* DWARF 2 */ \
+ .debug_info 0 : { *(.debug_info \
+ .gnu.linkonce.wi.*) } \
+ .debug_abbrev 0 : { *(.debug_abbrev) } \
+ .debug_line 0 : { *(.debug_line) } \
+ .debug_frame 0 : { *(.debug_frame) } \
+ .debug_str 0 : { *(.debug_str) } \
+ .debug_loc 0 : { *(.debug_loc) } \
+ .debug_macinfo 0 : { *(.debug_macinfo) } \
+ /* SGI/MIPS DWARF 2 extensions */ \
+ .debug_weaknames 0 : { *(.debug_weaknames) } \
+ .debug_funcnames 0 : { *(.debug_funcnames) } \
+ .debug_typenames 0 : { *(.debug_typenames) } \
+ .debug_varnames 0 : { *(.debug_varnames) } \
+
+ /* Stabs debugging sections. */
+#define STABS_DEBUG \
+ .stab 0 : { *(.stab) } \
+ .stabstr 0 : { *(.stabstr) } \
+ .stab.excl 0 : { *(.stab.excl) } \
+ .stab.exclstr 0 : { *(.stab.exclstr) } \
+ .stab.index 0 : { *(.stab.index) } \
+ .stab.indexstr 0 : { *(.stab.indexstr) } \
+ .comment 0 : { *(.comment) }
+
+#define BUG_TABLE \
+ . = ALIGN(8); \
+ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
+ __start___bug_table = .; \
+ *(__bug_table) \
+ __stop___bug_table = .; \
+ }
+
+#define NOTES \
+ .notes : { *(.note.*) } :note
+
+#define INITCALLS \
+ *(.initcall0.init) \
+ *(.initcall0s.init) \
+ *(.initcall1.init) \
+ *(.initcall1s.init) \
+ *(.initcall2.init) \
+ *(.initcall2s.init) \
+ *(.initcall3.init) \
+ *(.initcall3s.init) \
+ *(.initcall4.init) \
+ *(.initcall4s.init) \
+ *(.initcall5.init) \
+ *(.initcall5s.init) \
+ *(.initcallrootfs.init) \
+ *(.initcall6.init) \
+ *(.initcall6s.init) \
+ *(.initcall7.init) \
+ *(.initcall7s.init)
+
--- /dev/null
+/*
+ * asm-x86_64/acpi.h
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#ifdef __KERNEL__
+
+#include <acpi/pdc_intel.h>
+
+#define COMPILER_DEPENDENT_INT64 long long
+#define COMPILER_DEPENDENT_UINT64 unsigned long long
+
+/*
+ * Calling conventions:
+ *
+ * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
+ * ACPI_EXTERNAL_XFACE - External ACPI interfaces
+ * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
+ * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
+ */
+#define ACPI_SYSTEM_XFACE
+#define ACPI_EXTERNAL_XFACE
+#define ACPI_INTERNAL_XFACE
+#define ACPI_INTERNAL_VAR_XFACE
+
+/* Asm macros */
+
+#define ACPI_ASM_MACROS
+#define BREAKPOINT3
+#define ACPI_DISABLE_IRQS() local_irq_disable()
+#define ACPI_ENABLE_IRQS() local_irq_enable()
+#define ACPI_FLUSH_CPU_CACHE() wbinvd()
+
+
+static inline int
+__acpi_acquire_global_lock (unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
+ val = cmpxchg(lock, old, new);
+ } while (unlikely (val != old));
+ return (new < 3) ? -1 : 0;
+}
+
+static inline int
+__acpi_release_global_lock (unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = old & ~0x3;
+ val = cmpxchg(lock, old, new);
+ } while (unlikely (val != old));
+ return old & 0x1;
+}
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
+ ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
+ ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
+
+/*
+ * Math helper asm macros
+ */
+#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
+ asm("divl %2;" \
+ :"=a"(q32), "=d"(r32) \
+ :"r"(d32), \
+ "0"(n_lo), "1"(n_hi))
+
+
+#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
+ asm("shrl $1,%2;" \
+ "rcrl $1,%3;" \
+ :"=r"(n_hi), "=r"(n_lo) \
+ :"0"(n_hi), "1"(n_lo))
+
+#ifdef CONFIG_ACPI
+extern int acpi_lapic;
+extern int acpi_ioapic;
+extern int acpi_noirq;
+extern int acpi_strict;
+extern int acpi_disabled;
+extern int acpi_pci_disabled;
+extern int acpi_ht;
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_ht = 0;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
+#define FIX_ACPI_PAGES 4
+
+extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
+static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
+static inline void acpi_disable_pci(void)
+{
+ acpi_pci_disabled = 1;
+ acpi_noirq_set();
+}
+extern int acpi_irq_balance_set(char *str);
+
+#else /* !CONFIG_ACPI */
+
+#define acpi_lapic 0
+#define acpi_ioapic 0
+static inline void acpi_noirq_set(void) { }
+static inline void acpi_disable_pci(void) { }
+
+#endif /* !CONFIG_ACPI */
+
+extern int acpi_numa;
+extern int acpi_scan_nodes(unsigned long start, unsigned long end);
+#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+#ifdef CONFIG_ACPI_SLEEP
+
+/* routines for saving/restoring kernel state */
+extern int acpi_save_state_mem(void);
+extern void acpi_restore_state_mem(void);
+
+extern unsigned long acpi_wakeup_address;
+
+/* early initialization routine */
+extern void acpi_reserve_bootmem(void);
+
+#endif /*CONFIG_ACPI_SLEEP*/
+
+#define boot_cpu_physical_apicid boot_cpu_id
+
+extern int acpi_disabled;
+extern int acpi_pci_disabled;
+
+extern u8 x86_acpiid_to_apicid[];
+
+extern int acpi_skip_timer_override;
+
+#endif /*__KERNEL__*/
+
+#endif /*_ASM_ACPI_H*/
--- /dev/null
+#ifndef __ASM_APIC_H
+#define __ASM_APIC_H
+
+#include <lwk/init.h>
+#include <arch/fixmap.h>
+#include <arch/apicdef.h>
+#include <arch/system.h>
+
+/*
+ * Debugging macros
+ */
+#define APIC_QUIET 0
+#define APIC_VERBOSE 1
+#define APIC_DEBUG 2
+
+extern int apic_verbosity;
+extern int apic_runs_main_timer;
+
+extern unsigned long lapic_phys_addr;
+
+/*
+ * Define the default level of output to be very little
+ * This can be turned up by using apic=verbose for more
+ * information and apic=debug for _lots_ of information.
+ * apic_verbosity is defined in apic.c
+ */
+#define apic_printk(v, s, a...) do { \
+ if ((v) <= apic_verbosity) \
+ printk(s, ##a); \
+ } while (0)
+
+struct pt_regs;
+
+/*
+ * Basic functions accessing APICs.
+ */
+
+static __inline void apic_write(unsigned long reg, uint32_t val)
+{
+ *((volatile uint32_t *)(APIC_BASE+reg)) = val;
+}
+
+static __inline uint32_t apic_read(unsigned long reg)
+{
+ return *((volatile uint32_t *)(APIC_BASE+reg));
+}
+
+static inline void lapic_ack_interrupt(void)
+{
+ /*
+ * This gets compiled to a single instruction:
+ * movl $0x0,0xffffffffffdfe0b0
+ *
+ * Docs say use 0 for future compatibility.
+ */
+ apic_write(APIC_EOI, 0);
+}
+
+extern void clear_local_APIC (void);
+extern void connect_bsp_APIC (void);
+extern void disconnect_bsp_APIC (int virt_wire_setup);
+extern void disable_local_APIC (void);
+extern int verify_local_APIC (void);
+extern void cache_APIC_registers (void);
+extern void sync_Arb_IDs (void);
+extern void init_bsp_APIC (void);
+extern void setup_local_APIC (void);
+extern void init_apic_mappings (void);
+extern void smp_local_timer_interrupt (struct pt_regs * regs);
+extern void setup_boot_APIC_clock (void);
+extern void setup_secondary_APIC_clock (void);
+extern void setup_apic_nmi_watchdog (void);
+extern int reserve_lapic_nmi(void);
+extern void release_lapic_nmi(void);
+extern void disable_timer_nmi_watchdog(void);
+extern void enable_timer_nmi_watchdog(void);
+extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
+extern int APIC_init_uniprocessor (void);
+extern void disable_APIC_timer(void);
+extern void enable_APIC_timer(void);
+extern void clustered_apic_check(void);
+
+extern void nmi_watchdog_default(void);
+extern int setup_nmi_watchdog(char *);
+
+extern unsigned int nmi_watchdog;
+#define NMI_DEFAULT -1
+#define NMI_NONE 0
+#define NMI_IO_APIC 1
+#define NMI_LOCAL_APIC 2
+#define NMI_INVALID 3
+
+extern int disable_timer_pin_1;
+
+extern void setup_threshold_lvt(unsigned long lvt_off);
+
+void smp_send_timer_broadcast_ipi(void);
+void switch_APIC_timer_to_ipi(void *cpumask);
+void switch_ipi_to_APIC_timer(void *cpumask);
+
+#define ARCH_APICTIMER_STOPS_ON_C3 1
+
+extern unsigned boot_cpu_id;
+
+extern void __init lapic_map(void);
+extern void __init lapic_init(void);
+extern void lapic_set_timer(uint32_t count);
+extern unsigned int lapic_calibrate_timer(void);
+extern void lapic_dump(void);
+extern void lapic_send_init_ipi(unsigned int cpu);
+extern void lapic_send_startup_ipi(unsigned int cpu, unsigned long start_rip);
+extern void lapic_send_ipi(unsigned int cpu, unsigned int vector);
+
+#endif /* __ASM_APIC_H */
--- /dev/null
+#ifndef __ASM_APICDEF_H
+#define __ASM_APICDEF_H
+
+/*
+ * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
+ *
+ * Alan Cox <Alan.Cox@linux.org>, 1995.
+ * Ingo Molnar <mingo@redhat.com>, 1999, 2000
+ */
+
+#define APIC_DEFAULT_PHYS_BASE 0xfee00000
+
+#define APIC_ID 0x20
+#define APIC_ID_MASK (0xFFu<<24)
+#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
+#define SET_APIC_ID(x) (((x)<<24))
+#define APIC_LVR 0x30
+#define APIC_LVR_MASK 0xFF00FF
+#define GET_APIC_VERSION(x) ((x)&0xFFu)
+#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
+#define APIC_INTEGRATED(x) ((x)&0xF0u)
+#define APIC_TASKPRI 0x80
+#define APIC_TPRI_MASK 0xFFu
+#define APIC_ARBPRI 0x90
+#define APIC_ARBPRI_MASK 0xFFu
+#define APIC_PROCPRI 0xA0
+#define APIC_EOI 0xB0
+#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
+#define APIC_RRR 0xC0
+#define APIC_LDR 0xD0
+#define APIC_LDR_MASK (0xFFu<<24)
+#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
+#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
+#define APIC_ALL_CPUS 0xFFu
+#define APIC_DFR 0xE0
+#define APIC_DFR_CLUSTER 0x0FFFFFFFul
+#define APIC_DFR_FLAT 0xFFFFFFFFul
+#define APIC_SPIV 0xF0
+#define APIC_SPIV_FOCUS_DISABLED (1<<9)
+#define APIC_SPIV_APIC_ENABLED (1<<8)
+#define APIC_ISR 0x100
+#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
+#define APIC_TMR 0x180
+#define APIC_IRR 0x200
+#define APIC_ESR 0x280
+#define APIC_ESR_SEND_CS 0x00001
+#define APIC_ESR_RECV_CS 0x00002
+#define APIC_ESR_SEND_ACC 0x00004
+#define APIC_ESR_RECV_ACC 0x00008
+#define APIC_ESR_SENDILL 0x00020
+#define APIC_ESR_RECVILL 0x00040
+#define APIC_ESR_ILLREGA 0x00080
+#define APIC_ICR 0x300
+#define APIC_DEST_SELF 0x40000
+#define APIC_DEST_ALLINC 0x80000
+#define APIC_DEST_ALLBUT 0xC0000
+#define APIC_ICR_RR_MASK 0x30000
+#define APIC_ICR_RR_INVALID 0x00000
+#define APIC_ICR_RR_INPROG 0x10000
+#define APIC_ICR_RR_VALID 0x20000
+#define APIC_INT_LEVELTRIG 0x08000
+#define APIC_INT_ASSERT 0x04000
+#define APIC_ICR_BUSY 0x01000
+#define APIC_DEST_LOGICAL 0x00800
+#define APIC_DEST_PHYSICAL 0x00000
+#define APIC_DM_FIXED 0x00000
+#define APIC_DM_LOWEST 0x00100
+#define APIC_DM_SMI 0x00200
+#define APIC_DM_REMRD 0x00300
+#define APIC_DM_NMI 0x00400
+#define APIC_DM_INIT 0x00500
+#define APIC_DM_STARTUP 0x00600
+#define APIC_DM_EXTINT 0x00700
+#define APIC_VECTOR_MASK 0x000FF
+#define APIC_ICR2 0x310
+#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
+#define SET_APIC_DEST_FIELD(x) ((x)<<24)
+#define APIC_LVTT 0x320
+#define APIC_LVTTHMR 0x330
+#define APIC_LVTPC 0x340
+#define APIC_LVT0 0x350
+#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
+#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
+#define SET_APIC_TIMER_BASE(x) (((x)<<18))
+#define APIC_TIMER_BASE_CLKIN 0x0
+#define APIC_TIMER_BASE_TMBASE 0x1
+#define APIC_TIMER_BASE_DIV 0x2
+#define APIC_LVT_TIMER_PERIODIC (1<<17)
+#define APIC_LVT_MASKED (1<<16)
+#define APIC_LVT_LEVEL_TRIGGER (1<<15)
+#define APIC_LVT_REMOTE_IRR (1<<14)
+#define APIC_INPUT_POLARITY (1<<13)
+#define APIC_SEND_PENDING (1<<12)
+#define APIC_MODE_MASK 0x700
+#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
+#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
+#define APIC_MODE_FIXED 0x0
+#define APIC_MODE_NMI 0x4
+#define APIC_MODE_EXTINT 0x7
+#define APIC_LVT1 0x360
+#define APIC_LVTERR 0x370
+#define APIC_TMICT 0x380
+#define APIC_TMCCT 0x390
+#define APIC_TDCR 0x3E0
+#define APIC_TDR_DIV_TMBASE (1<<2)
+#define APIC_TDR_DIV_1 0xB
+#define APIC_TDR_DIV_2 0x0
+#define APIC_TDR_DIV_4 0x1
+#define APIC_TDR_DIV_8 0x2
+#define APIC_TDR_DIV_16 0x3
+#define APIC_TDR_DIV_32 0x8
+#define APIC_TDR_DIV_64 0x9
+#define APIC_TDR_DIV_128 0xA
+
+#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
+
+#define MAX_IO_APICS 16
+#define MAX_LOCAL_APIC 256
+
+/*
+ * All x86-64 systems are xAPIC compatible.
+ * In the following, "apicid" is a physical APIC ID.
+ */
+#define XAPIC_DEST_CPUS_SHIFT 4
+#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
+#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
+#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
+#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
+#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
+#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
+
+#define BAD_APICID 0xFFu
+
+#endif
--- /dev/null
+/* Copyright (c) 2007, Sandia National Laboratories */
+
+#ifndef _ARCH_X86_64_ASPACE_H
+#define _ARCH_X86_64_ASPACE_H
+
+#ifdef __KERNEL__
+#include <arch/page_table.h>
+
+struct arch_aspace {
+ xpte_t *pgd; /* Page global directory... root page table */
+};
+#endif
+
+#define SMARTMAP_ALIGN 0x8000000000UL /* Each PML4T entry covers 512 GB */
+
+#endif
--- /dev/null
+#ifndef _X86_64_ATOMIC_H
+#define _X86_64_ATOMIC_H
+
+/* atomic_t should be 32 bit signed type */
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v.
+ */
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; addl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; subl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; subl %2,%0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"ir" (i), "m" (v->counter) : "memory");
+ return c;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; incl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; decl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; decl %0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"m" (v->counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int atomic_inc_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; incl %0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"m" (v->counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __inline__ int atomic_add_negative(int i, atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; addl %2,%0; sets %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"ir" (i), "m" (v->counter) : "memory");
+ return c;
+}
+
+/**
+ * atomic_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+ int __i = i;
+ __asm__ __volatile__(
+ "lock ; xaddl %0, %1;"
+ :"=r"(i)
+ :"m"(v->counter), "0"(i));
+ return i + __i;
+}
+
+static __inline__ int atomic_sub_return(int i, atomic_t *v)
+{
+ return atomic_add_return(-i,v);
+}
+
+#define atomic_inc_return(v) (atomic_add_return(1,v))
+#define atomic_dec_return(v) (atomic_sub_return(1,v))
+
+/* An 64bit atomic type */
+
+typedef struct { volatile long counter; } atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically reads the value of @v.
+ * Doesn't imply a read memory barrier.
+ */
+#define atomic64_read(v) ((v)->counter)
+
+/**
+ * atomic64_set - set atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic64_set(v,i) (((v)->counter) = (i))
+
+/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v.
+ */
+static __inline__ void atomic64_add(long i, atomic64_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; addq %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static __inline__ void atomic64_sub(long i, atomic64_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; subq %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; subq %2,%0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"ir" (i), "m" (v->counter) : "memory");
+ return c;
+}
+
+/**
+ * atomic64_inc - increment atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically increments @v by 1.
+ */
+static __inline__ void atomic64_inc(atomic64_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; incq %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic64_dec - decrement atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static __inline__ void atomic64_dec(atomic64_t *v)
+{
+ __asm__ __volatile__(
+ "lock ; decq %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __inline__ int atomic64_dec_and_test(atomic64_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; decq %0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"m" (v->counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int atomic64_inc_and_test(atomic64_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; incq %0; sete %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"m" (v->counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock ; addq %2,%0; sets %1"
+ :"=m" (v->counter), "=qm" (c)
+ :"ir" (i), "m" (v->counter) : "memory");
+ return c;
+}
+
+/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
+{
+ long __i = i;
+ __asm__ __volatile__(
+ "lock ; xaddq %0, %1;"
+ :"=r"(i)
+ :"m"(v->counter), "0"(i));
+ return i + __i;
+}
+
+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
+{
+ return atomic64_add_return(-i,v);
+}
+
+#define atomic64_inc_return(v) (atomic64_add_return(1,v))
+#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
+
+#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/* These are x86-specific, used by some header files */
+#define atomic_clear_mask(mask, addr) \
+__asm__ __volatile__("lock ; andl %0,%1" \
+: : "r" (~(mask)),"m" (*addr) : "memory")
+
+#define atomic_set_mask(mask, addr) \
+__asm__ __volatile__("lock ; orl %0,%1" \
+: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
+
+/* Atomic operations are already serializing on x86 */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#include <arch-generic/atomic.h>
+#endif
--- /dev/null
+#ifndef __ASM_X86_64_AUXVEC_H
+#define __ASM_X86_64_AUXVEC_H
+
+#define AT_SYSINFO_EHDR 33
+
+#endif
--- /dev/null
+#ifndef _X86_64_BITOPS_H
+#define _X86_64_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+#define ADDR (*(volatile long *) addr)
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "lock ; btsl %1,%0"
+ :"+m" (ADDR)
+ :"dIr" (nr) : "memory");
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void __set_bit(int nr, volatile void * addr)
+{
+ __asm__ volatile(
+ "btsl %1,%0"
+ :"+m" (ADDR)
+ :"dIr" (nr) : "memory");
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "lock ; btrl %1,%0"
+ :"+m" (ADDR)
+ :"dIr" (nr));
+}
+
+static __inline__ void __clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "btrl %1,%0"
+ :"+m" (ADDR)
+ :"dIr" (nr));
+}
+
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void __change_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "btcl %1,%0"
+ :"+m" (ADDR)
+ :"dIr" (nr));
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void change_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "lock ; btcl %1,%0"
+ :"+m" (ADDR)
+ :"dIr" (nr));
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "lock ; btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"+m" (ADDR)
+ :"dIr" (nr) : "memory");
+ return oldbit;
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__(
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"+m" (ADDR)
+ :"dIr" (nr));
+ return oldbit;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "lock ; btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"+m" (ADDR)
+ :"dIr" (nr) : "memory");
+ return oldbit;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__(
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"+m" (ADDR)
+ :"dIr" (nr));
+ return oldbit;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"+m" (ADDR)
+ :"dIr" (nr) : "memory");
+ return oldbit;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "lock ; btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"+m" (ADDR)
+ :"dIr" (nr) : "memory");
+ return oldbit;
+}
+
+#if 0 /* Fool kernel-doc since it doesn't do macros yet */
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static int test_bit(int nr, const volatile void * addr);
+#endif
+
+static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int variable_test_bit(int nr, volatile const void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(
+ "btl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit)
+ :"m" (ADDR),"dIr" (nr));
+ return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+#undef ADDR
+
+extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
+extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
+extern long find_first_bit(const unsigned long * addr, unsigned long size);
+extern long find_next_bit(const unsigned long * addr, long size, long offset);
+
+/* return index of first bet set in val or max when no bit is set */
+static inline unsigned long __scanbit(unsigned long val, unsigned long max)
+{
+ asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
+ return val;
+}
+
+#define find_first_bit(addr,size) \
+((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
+ (__scanbit(*(unsigned long *)addr,(size))) : \
+ find_first_bit(addr,size)))
+
+#define find_next_bit(addr,size,off) \
+((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
+ ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
+ find_next_bit(addr,size,off)))
+
+#define find_first_zero_bit(addr,size) \
+((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
+ (__scanbit(~*(unsigned long *)addr,(size))) : \
+ find_first_zero_bit(addr,size)))
+
+#define find_next_zero_bit(addr,size,off) \
+((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
+ ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
+ find_next_zero_bit(addr,size,off)))
+
+/*
+ * Find string of zero bits in a bitmap. -1 when not found.
+ */
+extern unsigned long
+find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
+
+static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
+ int len)
+{
+ unsigned long end = i + len;
+ while (i < end) {
+ __set_bit(i, bitmap);
+ i++;
+ }
+}
+
+static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
+ int len)
+{
+ unsigned long end = i + len;
+ while (i < end) {
+ __clear_bit(i, bitmap);
+ i++;
+ }
+}
+
+/**
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static __inline__ unsigned long ffz(unsigned long word)
+{
+ __asm__("bsfq %1,%0"
+ :"=r" (word)
+ :"r" (~word));
+ return word;
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long __ffs(unsigned long word)
+{
+ __asm__("bsfq %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
+}
+
+/*
+ * __fls: find last bit set.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static __inline__ unsigned long __fls(unsigned long word)
+{
+ __asm__("bsrq %1,%0"
+ :"=r" (word)
+ :"rm" (word));
+ return word;
+}
+
+#ifdef __KERNEL__
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+ int r;
+
+ __asm__("bsfl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=r" (r) : "rm" (x), "r" (-1));
+ return r+1;
+}
+
+/**
+ * fls64 - find last bit set in 64 bit word
+ * @x: the word to search
+ *
+ * This is defined the same way as fls.
+ */
+static __inline__ int fls64(__u64 x)
+{
+ if (x == 0)
+ return 0;
+ return __fls(x) + 1;
+}
+
+/**
+ * fls - find last bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ */
+static __inline__ int fls(int x)
+{
+ int r;
+
+ __asm__("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+ return r+1;
+}
+
+#include <arch-generic/bitops/hweight.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _X86_64_BITOPS_H */
--- /dev/null
+#ifndef _ARCH_BOOT_H
+#define _ARCH_BOOT_H
+
+/* Don't touch these, unless you really know what you're doing. */
+#define DEF_INITSEG 0x9000
+#define DEF_SYSSEG 0x1000
+#define DEF_SETUPSEG 0x9020
+#define DEF_SYSSIZE 0x7F00
+
+#endif
--- /dev/null
+
+#ifndef _X86_64_BOOTSETUP_H
+#define _X86_64_BOOTSETUP_H 1
+
+#define BOOT_PARAM_SIZE 4096
+extern char x86_boot_params[BOOT_PARAM_SIZE];
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+#define PARAM ((unsigned char *)x86_boot_params)
+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
+#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
+#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
+#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
+#define INITRD_START (*(unsigned int *) (PARAM+0x218))
+#define INITRD_SIZE (*(unsigned int *) (PARAM+0x21c))
+#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
+#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
+#define COMMAND_LINE saved_command_line
+
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+/* Defines needed to find the kernel boot command line... sigh. */
+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
+#define OLD_CL_MAGIC_ADDR 0x90020
+#define OLD_CL_MAGIC 0xA33F
+#define OLD_CL_BASE_ADDR 0x90000
+#define OLD_CL_OFFSET 0x90022
+
+#endif
--- /dev/null
+#ifndef __ARCH_BUG_H
+#define __ARCH_BUG_H
+
+#include <lwk/stringify.h>
+
+/*
+ * Tell the user there is some problem. The exception handler decodes
+ * this frame.
+ */
+struct bug_frame {
+ unsigned char ud2[2];
+ unsigned char push;
+ signed int filename;
+ unsigned char ret;
+ unsigned short line;
+} __attribute__((packed));
+
+#define HAVE_ARCH_BUG
+/* We turn the bug frame into valid instructions to not confuse
+ the disassembler. Thanks to Jan Beulich & Suresh Siddha
+ for nice instruction selection.
+ The magic numbers generate mov $64bitimm,%eax ; ret $offset. */
+#define BUG() \
+ asm volatile( \
+ "ud2 ; pushq $%c1 ; ret $%c0" :: \
+ "i"(__LINE__), "i" (__FILE__))
+void out_of_line_bug(void);
+
+#include <arch-generic/bug.h>
+#endif
--- /dev/null
+#ifndef _X86_64_BYTEORDER_H
+#define _X86_64_BYTEORDER_H
+
+#include <arch/types.h>
+#include <lwk/compiler.h>
+
+#ifdef __GNUC__
+
+static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
+{
+ __asm__("bswapq %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+ __asm__("bswapl %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+/* Do not define swab16. Gcc is smart enough to recognize "C" version and
+ convert it into rotation or exhange. */
+
+#define __arch__swab32(x) ___arch__swab32(x)
+#define __arch__swab64(x) ___arch__swab64(x)
+
+#endif /* __GNUC__ */
+
+#define __BYTEORDER_HAS_U64__
+
+#include <lwk/byteorder/little_endian.h>
+
+#endif /* _X86_64_BYTEORDER_H */
--- /dev/null
+/*
+ * include/arch-x86_64/cache.h
+ */
+#ifndef _ARCH_CACHE_H
+#define _ARCH_CACHE_H
+
+/* L1 cache line size */
+#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/* Inter-node cache line size for NUMA systems */
+#define INTERNODE_CACHE_SHIFT (CONFIG_X86_INTERNODE_CACHE_SHIFT)
+#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
+
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+#endif
--- /dev/null
+/*
+ * Some macros to handle stack frames in assembly.
+ */
+
+
+#define R15 0
+#define R14 8
+#define R13 16
+#define R12 24
+#define RBP 32
+#define RBX 40
+/* arguments: interrupts/non tracing syscalls only save upto here*/
+#define R11 48
+#define R10 56
+#define R9 64
+#define R8 72
+#define RAX 80
+#define RCX 88
+#define RDX 96
+#define RSI 104
+#define RDI 112
+#define ORIG_RAX 120 /* + error_code */
+/* end of arguments */
+/* cpu exception frame or undefined in case of fast syscall. */
+#define RIP 128
+#define CS 136
+#define EFLAGS 144
+#define RSP 152
+#define SS 160
+#define ARGOFFSET R11
+#define SWFRAME ORIG_RAX
+
+ .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0
+ subq $9*8+\addskip,%rsp
+ CFI_ADJUST_CFA_OFFSET 9*8+\addskip
+ movq %rdi,8*8(%rsp)
+ CFI_REL_OFFSET rdi,8*8
+ movq %rsi,7*8(%rsp)
+ CFI_REL_OFFSET rsi,7*8
+ movq %rdx,6*8(%rsp)
+ CFI_REL_OFFSET rdx,6*8
+ .if \norcx
+ .else
+ movq %rcx,5*8(%rsp)
+ CFI_REL_OFFSET rcx,5*8
+ .endif
+ movq %rax,4*8(%rsp)
+ CFI_REL_OFFSET rax,4*8
+ .if \nor891011
+ .else
+ movq %r8,3*8(%rsp)
+ CFI_REL_OFFSET r8,3*8
+ movq %r9,2*8(%rsp)
+ CFI_REL_OFFSET r9,2*8
+ movq %r10,1*8(%rsp)
+ CFI_REL_OFFSET r10,1*8
+ movq %r11,(%rsp)
+ CFI_REL_OFFSET r11,0*8
+ .endif
+ .endm
+
+#define ARG_SKIP 9*8
+ .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
+ .if \skipr11
+ .else
+ movq (%rsp),%r11
+ CFI_RESTORE r11
+ .endif
+ .if \skipr8910
+ .else
+ movq 1*8(%rsp),%r10
+ CFI_RESTORE r10
+ movq 2*8(%rsp),%r9
+ CFI_RESTORE r9
+ movq 3*8(%rsp),%r8
+ CFI_RESTORE r8
+ .endif
+ .if \skiprax
+ .else
+ movq 4*8(%rsp),%rax
+ CFI_RESTORE rax
+ .endif
+ .if \skiprcx
+ .else
+ movq 5*8(%rsp),%rcx
+ CFI_RESTORE rcx
+ .endif
+ .if \skiprdx
+ .else
+ movq 6*8(%rsp),%rdx
+ CFI_RESTORE rdx
+ .endif
+ movq 7*8(%rsp),%rsi
+ CFI_RESTORE rsi
+ movq 8*8(%rsp),%rdi
+ CFI_RESTORE rdi
+ .if ARG_SKIP+\addskip > 0
+ addq $ARG_SKIP+\addskip,%rsp
+ CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
+ .endif
+ .endm
+
+ .macro LOAD_ARGS offset
+ movq \offset(%rsp),%r11
+ movq \offset+8(%rsp),%r10
+ movq \offset+16(%rsp),%r9
+ movq \offset+24(%rsp),%r8
+ movq \offset+40(%rsp),%rcx
+ movq \offset+48(%rsp),%rdx
+ movq \offset+56(%rsp),%rsi
+ movq \offset+64(%rsp),%rdi
+ movq \offset+72(%rsp),%rax
+ .endm
+
+#define REST_SKIP 6*8
+ .macro SAVE_REST
+ subq $REST_SKIP,%rsp
+ CFI_ADJUST_CFA_OFFSET REST_SKIP
+ movq %rbx,5*8(%rsp)
+ CFI_REL_OFFSET rbx,5*8
+ movq %rbp,4*8(%rsp)
+ CFI_REL_OFFSET rbp,4*8
+ movq %r12,3*8(%rsp)
+ CFI_REL_OFFSET r12,3*8
+ movq %r13,2*8(%rsp)
+ CFI_REL_OFFSET r13,2*8
+ movq %r14,1*8(%rsp)
+ CFI_REL_OFFSET r14,1*8
+ movq %r15,(%rsp)
+ CFI_REL_OFFSET r15,0*8
+ .endm
+
+ .macro RESTORE_REST
+ movq (%rsp),%r15
+ CFI_RESTORE r15
+ movq 1*8(%rsp),%r14
+ CFI_RESTORE r14
+ movq 2*8(%rsp),%r13
+ CFI_RESTORE r13
+ movq 3*8(%rsp),%r12
+ CFI_RESTORE r12
+ movq 4*8(%rsp),%rbp
+ CFI_RESTORE rbp
+ movq 5*8(%rsp),%rbx
+ CFI_RESTORE rbx
+ addq $REST_SKIP,%rsp
+ CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
+ .endm
+
+ .macro SAVE_ALL
+ SAVE_ARGS
+ SAVE_REST
+ .endm
+
+ .macro RESTORE_ALL addskip=0
+ RESTORE_REST
+ RESTORE_ARGS 0,\addskip
+ .endm
+
+ .macro icebp
+ .byte 0xf1
+ .endm
--- /dev/null
+/*
+ * cpufeature.h
+ *
+ * Defines x86 CPU feature bits
+ */
+
+#ifndef _X86_64_CPUFEATURE_H
+#define _X86_64_CPUFEATURE_H
+
+#define NCAPINTS 8 /* N 32-bit words worth of info */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
+#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
+#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
+#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
+#define X86_FEATURE_DS (0*32+21) /* Debug Store */
+#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
+ /* of FPU context), and CR4.OSFXSR available */
+#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
+#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
+#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
+#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
+#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
+#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
+#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
+#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
+/* 14 free */
+#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */
+#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
+#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
+#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
+#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_CID (4*32+10) /* Context ID */
+#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
+#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
+#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
+#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
+#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
+
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc
+ */
+#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
+
+#define cpu_has(c, bit) test_bit(bit, (c)->arch.x86_capability)
+#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.arch.x86_capability)
+
+#define cpu_has_fpu 1
+#define cpu_has_vme 0
+#define cpu_has_de 1
+#define cpu_has_pse 1
+#define cpu_has_tsc 1
+#define cpu_has_pae ___BUG___
+#define cpu_has_pge 1
+#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
+#define cpu_has_mtrr 1
+#define cpu_has_mmx 1
+#define cpu_has_fxsr 1
+#define cpu_has_xmm 1
+#define cpu_has_xmm2 1
+#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
+#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
+#define cpu_has_mp 1 /* XXX */
+#define cpu_has_k6_mtrr 0
+#define cpu_has_cyrix_arr 0
+#define cpu_has_centaur_mcr 0
+#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
+
+#endif /* _X86_64_CPUFEATURE_H */
--- /dev/null
+/**
+ * Portions derived from Linux include/asm-x86_64/processor.h
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef _X86_64_CPUINFO_H
+#define _X86_64_CPUINFO_H
+
+#include <arch/cpufeature.h>
+
+/**
+ * arch_cpuinfo.x86_cache_size first dimension indicies.
+ * and
+ * arch_cpuinfo.x86_tlbsize first dimension indicies.
+ */
+#define INST 0 /* Instruction */
+#define DATA 1 /* Data */
+#define UNIF 2 /* Unified Instruction and Data */
+
+/**
+ * arch_cpuinfo.x86_cache_size second dimension indices.
+ * and
+ * arch_cpuinfo.x86_tlbsize second dimension indices.
+ */
+#define L1 0
+#define L2 1
+#define L3 2
+
+/**
+ * arch_cpuinfo.x86_tlbsize third dimension indices.
+ */
+#define PAGE_4KB 0
+#define PAGE_2MB 1
+#define PAGE_1GB 2
+
+/**
+ * Architecture specific CPU information and hardware bug flags.
+ * CPU info is kept separately for each CPU.
+ */
+struct arch_cpuinfo {
+ uint8_t x86_vendor; /* CPU vendor */
+ uint8_t x86_family; /* CPU family */
+ uint8_t x86_model; /* CPU model */
+ uint8_t x86_stepping; /* CPU stepping */
+ uint32_t x86_capability[NCAPINTS]; /* optional CPU features */
+ char x86_vendor_id[16]; /* Vendor ID string */
+ char x86_model_id[64]; /* Model/Brand ID string */
+ uint16_t x86_cache_size[3][3]; /* [I|D|U][LEVEL], in KB */
+ uint16_t x86_cache_line[3][3]; /* [I|D|U][LEVEL], in bytes */
+ int x86_clflush_size; /* In bytes */
+ uint16_t x86_tlb_size[3][2][3]; /* [I|D|U][LEVEL][PAGE_SIZE], in #entries */
+ uint8_t x86_virt_bits; /* Bits of virt address space */
+ uint8_t x86_phys_bits; /* Bits of phys address space */
+ uint8_t x86_pkg_cores; /* Number of cores in this CPU's package */
+ uint32_t x86_power; /* Power management features */
+ uint32_t cpuid_level; /* Max supported CPUID level */
+ uint32_t extended_cpuid_level; /* Max extended CPUID func supported */
+ uint32_t cur_cpu_khz; /* Current CPU freq. in KHz */
+ uint32_t max_cpu_khz; /* Maximum CPU freq. in KHz */
+ uint32_t min_cpu_khz; /* Minimum CPU freq. in KHz */
+ uint32_t tsc_khz; /* Time stamp counter freq. in KHz */
+ uint32_t lapic_khz; /* Local APIC bus freq. in KHz */
+ uint8_t apic_id; /* Local APIC ID, phys CPU ID */
+ uint8_t initial_lapic_id; /* As reported by CPU ID */
+};
+
+extern struct cpuinfo boot_cpu_data;
+
+struct cpuinfo;
+extern void print_arch_cpuinfo(struct cpuinfo *);
+extern void early_identify_cpu(struct cpuinfo *);
+
+#endif
--- /dev/null
+#ifndef _X86_64_CURRENT_H
+#define _X86_64_CURRENT_H
+
+#if !defined(__ASSEMBLY__)
+struct task_struct;
+
+#include <arch/pda.h>
+
+/**
+ * In normal operation, the current task pointer is read directly from
+ * the PDA.
+ *
+ * If the PDA has not been setup or is not available for some reason,
+ * the slower get_current_via_RSP() must be used instead. This is
+ * sometimes necessary during the bootstrap process.
+ */
+static inline struct task_struct *
+get_current(void)
+{
+ struct task_struct *t = read_pda(pcurrent);
+ return t;
+}
+#define current get_current()
+
+/**
+ * Derives the current task pointer from the current value of the
+ * stack pointer (RSP register).
+ *
+ * WARNING: Do not call this from interrupt context. It won't work.
+ * It is only safe to call this from task context.
+ */
+static inline struct task_struct *
+get_current_via_RSP(void)
+{
+ struct task_struct *tsk;
+ __asm__("andq %%rsp,%0; " : "=r" (tsk) : "0" (~(TASK_SIZE - 1)));
+ return tsk;
+}
+
+#else
+
+#ifndef ASM_OFFSET_H
+#include <arch/asm-offsets.h>
+#endif
+
+#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
+
+#endif
+
+#endif /* !(_X86_64_CURRENT_H) */
--- /dev/null
+#ifndef _X86_64_DEBUGREG_H
+#define _X86_64_DEBUGREG_H
+
+
+/* Indicate the register numbers for a number of the specific
+ debug registers. Registers 0-3 contain the addresses we wish to trap on */
+#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
+#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
+
+#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
+#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
+
+/* Define a few things for the status register. We can use this to determine
+ which debugging register was responsible for the trap. The other bits
+ are either reserved or not of interest to us. */
+
+#define DR_TRAP0 (0x1) /* db0 */
+#define DR_TRAP1 (0x2) /* db1 */
+#define DR_TRAP2 (0x4) /* db2 */
+#define DR_TRAP3 (0x8) /* db3 */
+
+#define DR_STEP (0x4000) /* single-step */
+#define DR_SWITCH (0x8000) /* task switch */
+
+/* Now define a bunch of things for manipulating the control register.
+ The top two bytes of the control register consist of 4 fields of 4
+ bits - each field corresponds to one of the four debug registers,
+ and indicates what types of access we trap on, and how large the data
+ field is that we are looking at */
+
+#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
+#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
+
+#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
+#define DR_RW_WRITE (0x1)
+#define DR_RW_READ (0x3)
+
+#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
+#define DR_LEN_2 (0x4)
+#define DR_LEN_4 (0xC)
+#define DR_LEN_8 (0x8)
+
+/* The low byte to the control register determine which registers are
+ enabled. There are 4 fields of two bits. One bit is "local", meaning
+ that the processor will reset the bit after a task switch and the other
+ is global meaning that we have to explicitly reset the bit. With linux,
+ you can use either one, since we explicitly zero the register when we enter
+ kernel mode. */
+
+#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
+#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
+#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
+
+#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
+#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
+
+/* The second byte to the control register has a few special things.
+ We can slow the instruction pipeline for instructions coming via the
+ gdt or the ldt if we want to. I am not sure why this is an advantage */
+
+#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
+#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
+#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
+
+#endif
--- /dev/null
+#ifndef _X86_64_DELAY_H
+#define _X86_64_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/x86_64/lib/delay.c
+ */
+
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long usecs);
+extern void __const_udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
+ __udelay(n))
+
+#define ndelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+ __ndelay(n))
+
+
+#endif /* defined(_X86_64_DELAY_H) */
--- /dev/null
+/* Written 2000 by Andi Kleen */
+#ifndef _ARCH_DESC_H
+#define _ARCH_DESC_H
+
+#include <arch/ldt.h>
+
+#ifndef __ASSEMBLY__
+
+#include <lwk/string.h>
+#include <lwk/smp.h>
+
+#include <arch/segment.h>
+#include <arch/mmu.h>
+
+// 8 byte segment descriptor
+struct desc_struct {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
+ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
+} __attribute__((packed));
+
+struct n_desc_struct {
+ unsigned int a,b;
+};
+
+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
+
+enum {
+ GATE_INTERRUPT = 0xE,
+ GATE_TRAP = 0xF,
+ GATE_CALL = 0xC,
+};
+
+/**
+ * Long-Mode Gate Descriptor (16-bytes)
+ */
+struct gate_struct {
+ uint16_t offset_low; /* [15-0] of target code segment offset */
+ uint16_t segment; /* Target code segment selector */
+ unsigned ist : 3; /* Interrupt-Stack-Table index into TSS */
+ unsigned zero0 : 5;
+ unsigned type : 5; /* Gate descriptor type */
+ unsigned dpl : 2; /* Privilege level */
+ unsigned p : 1; /* Present bit... in use? */
+ uint16_t offset_middle; /* [31-24] of target code segment offset */
+ uint32_t offset_high; /* [63-32] of target code segment offset */
+ uint32_t zero1;
+} __attribute__((packed));
+
+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
+
+enum {
+ DESC_TSS = 0x9,
+ DESC_LDT = 0x2,
+};
+
+// LDT or TSS descriptor in the GDT. 16 bytes.
+struct ldttss_desc {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ u32 base3;
+ u32 zero1;
+} __attribute__((packed));
+
+struct desc_ptr {
+ unsigned short size;
+ unsigned long address;
+} __attribute__((packed)) ;
+
+#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
+#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern struct gate_struct idt_table[];
+extern struct desc_ptr cpu_gdt_descr[];
+
+/* the cpu gdt accessor */
+#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
+
+/**
+ * Installs a Long-Mode gate descriptor.
+ */
+static inline void
+_set_gate(
+ void * adr, /* Address to install gate descriptor at */
+ unsigned type, /* Type of gate */
+ unsigned long func, /* The handler function for the gate */
+ unsigned dpl, /* Privilege level */
+ unsigned ist /* Interupt-Stack-Table index */
+)
+{
+ struct gate_struct s;
+ s.offset_low = PTR_LOW(func);
+ s.segment = __KERNEL_CS;
+ s.ist = ist;
+ s.p = 1;
+ s.dpl = dpl;
+ s.zero0 = 0;
+ s.zero1 = 0;
+ s.type = type;
+ s.offset_middle = PTR_MIDDLE(func);
+ s.offset_high = PTR_HIGH(func);
+ /* does not need to be atomic because it is only done once at setup time */
+ memcpy(adr, &s, 16);
+}
+
+/**
+ * Installs an interrupt gate.
+ * The interrupt will execute on the normal kernel stack.
+ */
+static inline void
+set_intr_gate(int nr, void *func)
+{
+ BUG_ON((unsigned)nr > 0xFF);
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
+}
+
+/**
+ * Installs an interrupt gate.
+ * The interrupt will execute on the stack specified by the 'ist' argument.
+ */
+static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
+{
+ BUG_ON((unsigned)nr > 0xFF);
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
+}
+
+/**
+ * Installs a system interrupt gate.
+ * The privilege level is set to 3, meaning that user-mode can trigger it.
+ */
+static inline void set_system_gate(int nr, void *func)
+{
+ BUG_ON((unsigned)nr > 0xFF);
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
+}
+
+/**
+ * Installs a system interrupt gate.
+ * The privilege level is set to 3, meaning that user-mode can trigger it.
+ * The interrupt will execute on the stack specified by the 'ist' argument.
+ */
+static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
+}
+
+static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
+ unsigned size)
+{
+ struct ldttss_desc d;
+ memset(&d,0,sizeof(d));
+ d.limit0 = size & 0xFFFF;
+ d.base0 = PTR_LOW(tss);
+ d.base1 = PTR_MIDDLE(tss) & 0xFF;
+ d.type = type;
+ d.p = 1;
+ d.limit1 = (size >> 16) & 0xF;
+ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
+ d.base3 = PTR_HIGH(tss);
+ memcpy(ptr, &d, 16);
+}
+
+static inline void set_tss_desc(unsigned cpu, void *addr)
+{
+ /*
+ * sizeof(unsigned long) coming from an extra "long" at the end
+ * of the iobitmap. See tss_struct definition in processor.h
+ *
+ * -1? seg base+limit should be pointing to the address of the
+ * last valid byte
+ */
+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
+ (unsigned long)addr, DESC_TSS,
+ IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
+}
+
+static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
+{
+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
+ DESC_LDT, size * 8 - 1);
+}
+
+static inline void set_seg_base(unsigned cpu, int entry, void *base)
+{
+ struct desc_struct *d = &cpu_gdt(cpu)[entry];
+ u32 addr = (u32)(u64)base;
+ BUG_ON((u64)base >> 32);
+ d->base0 = addr & 0xffff;
+ d->base1 = (addr >> 16) & 0xff;
+ d->base2 = (addr >> 24) & 0xff;
+}
+
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+/* Don't allow setting of the lm bit. It is useless anyways because
+ 64bit system calls require __USER_CS. */
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ /* ((info)->lm << 21) | */ \
+ 0x7000)
+
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 && \
+ (info)->lm == 0)
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
+ gdt[0] = t->tls_array[0];
+ gdt[1] = t->tls_array[1];
+ gdt[2] = t->tls_array[2];
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
+{
+ int count = pc->size;
+
+ if (likely(!count)) {
+ clear_LDT();
+ return;
+ }
+
+ set_ldt_desc(cpu, pc->ldt, count);
+ load_LDT_desc();
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+ int cpu = get_cpu();
+ load_LDT_nolock(pc, cpu);
+ put_cpu();
+}
+
+extern struct desc_ptr idt_descr;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
--- /dev/null
+#include <arch-generic/div64.h>
--- /dev/null
+#ifndef _DWARF2_H
+#define _DWARF2_H 1
+
+
+#ifndef __ASSEMBLY__
+#warning "asm/dwarf2.h should be only included in pure assembly files"
+#endif
+
+/*
+ Macros for dwarf2 CFI unwind table entries.
+ See "as.info" for details on these pseudo ops. Unfortunately
+ they are only supported in very new binutils, so define them
+ away for older version.
+ */
+
+#ifdef CONFIG_UNWIND_INFO
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
+#define CFI_OFFSET .cfi_offset
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_REGISTER .cfi_register
+#define CFI_RESTORE .cfi_restore
+#define CFI_REMEMBER_STATE .cfi_remember_state
+#define CFI_RESTORE_STATE .cfi_restore_state
+#define CFI_UNDEFINED .cfi_undefined
+
+#else
+
+/* use assembler line comment character # to ignore the arguments. */
+#define CFI_STARTPROC #
+#define CFI_ENDPROC #
+#define CFI_DEF_CFA #
+#define CFI_DEF_CFA_REGISTER #
+#define CFI_DEF_CFA_OFFSET #
+#define CFI_ADJUST_CFA_OFFSET #
+#define CFI_OFFSET #
+#define CFI_REL_OFFSET #
+#define CFI_REGISTER #
+#define CFI_RESTORE #
+#define CFI_REMEMBER_STATE #
+#define CFI_RESTORE_STATE #
+#define CFI_UNDEFINED #
+
+#endif
+
+#endif
--- /dev/null
+/*
+ * structures and definitions for the int 15, ax=e820 memory map
+ * scheme.
+ *
+ * In a nutshell, setup.S populates a scratch table in the
+ * empty_zero_block that contains a list of usable address/size
+ * duples. setup.c, this information is transferred into the e820map,
+ * and in init.c/numa.c, that new information is used to mark pages
+ * reserved or not.
+ */
+#ifndef _ARCH_E820_H
+#define _ARCH_E820_H
+
+#include <lwk/init.h>
+
+#define E820MAP 0x2d0 /* our map */
+#define E820MAX 128 /* number of entries in E820MAP */
+#define E820NR 0x1e8 /* # entries in E820MAP */
+
+#define E820_RAM 1
+#define E820_RESERVED 2
+#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */
+#define E820_NVS 4
+
+#define HIGH_MEMORY (1024*1024)
+
+#define LOWMEMSIZE() (0x9f000)
+
+#ifndef __ASSEMBLY__
+struct e820entry {
+ u64 addr; /* start of memory segment */
+ u64 size; /* size of memory segment */
+ u32 type; /* type of memory segment */
+} __attribute__((packed));
+
+struct e820map {
+ int nr_map;
+ struct e820entry map[E820MAX];
+};
+
+extern unsigned long find_e820_area(unsigned long start, unsigned long end,
+ unsigned size);
+extern void add_memory_region(unsigned long start, unsigned long size,
+ int type);
+extern void setup_memory_region(void);
+extern void contig_e820_setup(void);
+extern unsigned long e820_end_of_ram(void);
+extern void e820_reserve_resources(void);
+extern void e820_print_map(char *who);
+extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
+extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
+
+extern void e820_bootmem_free(unsigned long start,unsigned long end);
+extern void e820_setup_gap(void);
+extern unsigned long e820_hole_size(unsigned long start_pfn,
+ unsigned long end_pfn);
+
+extern void __init parse_memopt(char *p, char **end);
+extern void __init parse_memmapopt(char *p, char **end);
+
+extern struct e820map e820;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ARCH_E820_H */
--- /dev/null
+#ifndef __ASM_X86_64_ELF_H
+#define __ASM_X86_64_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+
+#include <arch/ptrace.h>
+#include <arch/user.h>
+
+/* x86-64 relocation types */
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
+ offset to GOT */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+
+#define R_X86_64_NUM 16
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_i387_struct elf_fpregset_t;
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_X86_64
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+/* I'm not sure if we can use '-' here */
+#define ELF_PLATFORM ("x86_64")
+
+#define ELF_EXEC_PAGESIZE 4096
+
+#ifdef __KERNEL__
+#include <arch/processor.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+ ((x)->e_machine == EM_X86_64)
+
+
+/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
+ contains a pointer to a function which might be registered using `atexit'.
+ This provides a mean for the dynamic linker to call DT_FINI functions for
+ shared libraries that have been loaded before the code runs.
+
+ A value of 0 tells we have no such handler.
+
+ We might as well make sure everything else is cleared too (except for %esp),
+ just to make things more deterministic.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) do { \
+ struct task_struct *cur = current; \
+ (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \
+ (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \
+ (_r)->rax = 0; \
+ (_r)->r8 = 0; \
+ (_r)->r9 = 0; \
+ (_r)->r10 = 0; \
+ (_r)->r11 = 0; \
+ (_r)->r12 = 0; \
+ (_r)->r13 = 0; \
+ (_r)->r14 = 0; \
+ (_r)->r15 = 0; \
+ cur->thread.fs = 0; cur->thread.gs = 0; \
+ cur->thread.fsindex = 0; cur->thread.gsindex = 0; \
+ cur->thread.ds = 0; cur->thread.es = 0; \
+ clear_thread_flag(TIF_IA32); \
+} while (0)
+
+#define USE_ELF_CORE_DUMP
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different). Assumes current is the process
+ getting dumped. */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
+ unsigned v; \
+ (pr_reg)[0] = (regs)->r15; \
+ (pr_reg)[1] = (regs)->r14; \
+ (pr_reg)[2] = (regs)->r13; \
+ (pr_reg)[3] = (regs)->r12; \
+ (pr_reg)[4] = (regs)->rbp; \
+ (pr_reg)[5] = (regs)->rbx; \
+ (pr_reg)[6] = (regs)->r11; \
+ (pr_reg)[7] = (regs)->r10; \
+ (pr_reg)[8] = (regs)->r9; \
+ (pr_reg)[9] = (regs)->r8; \
+ (pr_reg)[10] = (regs)->rax; \
+ (pr_reg)[11] = (regs)->rcx; \
+ (pr_reg)[12] = (regs)->rdx; \
+ (pr_reg)[13] = (regs)->rsi; \
+ (pr_reg)[14] = (regs)->rdi; \
+ (pr_reg)[15] = (regs)->orig_rax; \
+ (pr_reg)[16] = (regs)->rip; \
+ (pr_reg)[17] = (regs)->cs; \
+ (pr_reg)[18] = (regs)->eflags; \
+ (pr_reg)[19] = (regs)->rsp; \
+ (pr_reg)[20] = (regs)->ss; \
+ (pr_reg)[21] = current->thread.fs; \
+ (pr_reg)[22] = current->thread.gs; \
+ asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
+ asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
+ asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
+ asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
+} while(0);
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP(cpu) (cpu_info[cpu].arch.x86_capability[0])
+
+extern void set_personality_64bit(void);
+#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ */
+#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
+
+struct task_struct;
+
+extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
+extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
+
+#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
+#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
+
+/* 1GB for 64bit, 8MB for 32bit */
+#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
+
+#endif
+
+#endif
--- /dev/null
+#ifndef _X86_64_ERRNO_H
+#define _X86_64_ERRNO_H
+
+#include <arch-generic/errno.h>
+
+#endif
--- /dev/null
+#ifndef _ASM_X86_64_EXTABLE_H
+#define _ASM_X86_64_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * The nice thing about this mechanism is that the fixup code is completely
+ * out of line with the main instruction path. This means when everything
+ * is well, we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry
+{
+ unsigned long insn; /* Instruction addr that is allowed to fault */
+ unsigned long fixup; /* Fixup handler address */
+};
+
+#define ARCH_HAS_SEARCH_EXTABLE
+
+#endif
--- /dev/null
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <lwk/kernel.h>
+#include <arch/apicdef.h>
+#include <arch/page.h>
+#include <arch/vsyscall.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1). Use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+enum fixed_addresses {
+ VSYSCALL_LAST_PAGE,
+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+ FIX_APIC_BASE, /* local (CPU) APIC) */
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+ __end_of_fixed_addresses
+};
+
+extern void __set_fixmap(enum fixed_addresses fixmap_index,
+ unsigned long phys_addr, pgprot_t prot);
+
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
+#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
+#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ __this_fixmap_does_not_exist();
+
+ return __fix_to_virt(idx);
+}
+
+#endif
--- /dev/null
+/*
+ * include/asm-x86_64/i387.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef _X86_64_I387_H
+#define _X86_64_I387_H
+
+#include <lwk/task.h>
+#include <lwk/errno.h>
+#include <arch/processor.h>
+#include <arch/sigcontext.h>
+#include <arch/user.h>
+#include <arch/uaccess.h>
+
+extern void fpu_init(void);
+extern unsigned int mxcsr_feature_mask;
+extern void mxcsr_feature_mask_init(void);
+
+/* Ignore delayed exceptions from user space */
+static inline void tolerant_fwait(void)
+{
+ asm volatile("1: fwait\n"
+ "2:\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 1b,2b\n"
+ " .previous\n");
+}
+
+#define clear_fpu(tsk) do { \
+ if (tsk->arch.status & TS_USEDFPU) { \
+ tolerant_fwait(); \
+ tsk->arch.status &= ~TS_USEDFPU; \
+ stts(); \
+ } \
+} while (0)
+
+/*
+ * ptrace request handers...
+ */
+extern int get_fpregs(struct user_i387_struct __user *buf,
+ struct task_struct *tsk);
+extern int set_fpregs(struct task_struct *tsk,
+ struct user_i387_struct __user *buf);
+
+/*
+ * i387 state interaction
+ */
+#define get_fpu_mxcsr(t) ((t)->arch.i387.fxsave.mxcsr)
+#define get_fpu_cwd(t) ((t)->arch.i387.fxsave.cwd)
+#define get_fpu_fxsr_twd(t) ((t)->arch.i387.fxsave.twd)
+#define get_fpu_swd(t) ((t)->arch.i387.fxsave.swd)
+#define set_fpu_cwd(t,val) ((t)->arch.i387.fxsave.cwd = (val))
+#define set_fpu_swd(t,val) ((t)->arch.i387.fxsave.swd = (val))
+#define set_fpu_fxsr_twd(t,val) ((t)->arch.i387.fxsave.twd = (val))
+
+#define X87_FSW_ES (1 << 7) /* Exception Summary */
+
+/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. The kernel data segment can be sometimes 0 and sometimes
+ new user value. Both should be ok.
+ Use the PDA as safe address because it should be already in L1. */
+static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
+{
+ if (unlikely(fx->swd & X87_FSW_ES))
+ asm volatile("fnclex");
+
+ /*
+ * Unconditional fix for AMD CPUs that don't save/restore FDP/FIP/FOP.
+ * TODO: some CPUs may not need this, possibly use Linux
+ * alternative_input() mechanism.
+ */
+ asm volatile ("emms"); /* clear stack tags */
+ asm volatile ("fildl %gs:0"); /* load to clear state */
+}
+
+static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
+{
+ int err;
+
+ asm volatile("1: rex64/fxsave (%[fx])\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 1b,3b\n"
+ ".previous"
+ : [err] "=r" (err), "=m" (*fx)
+#if 0 /* See comment in __fxsave_clear() below. */
+ : [fx] "r" (fx), "0" (0));
+#else
+ : [fx] "cdaSDb" (fx), "0" (0));
+#endif
+ if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct)))
+ err = -EFAULT;
+ /* No need to clear here because the caller clears USED_MATH */
+ return err;
+}
+
+static inline void __fxsave_clear(struct task_struct *tsk)
+{
+ /* Using "rex64; fxsave %0" is broken because, if the memory operand
+ uses any extended registers for addressing, a second REX prefix
+ will be generated (to the assembler, rex64 followed by semicolon
+ is a separate instruction), and hence the 64-bitness is lost. */
+#if 0
+ /* Using "fxsaveq %0" would be the ideal choice, but is only supported
+ starting with gas 2.16. */
+ __asm__ __volatile__("fxsaveq %0"
+ : "=m" (tsk->arch.thread.i387.fxsave));
+#elif 0
+ /* Using, as a workaround, the properly prefixed form below isn't
+ accepted by any binutils version so far released, complaining that
+ the same type of prefix is used twice if an extended register is
+ needed for addressing (fix submitted to mainline 2005-11-21). */
+ __asm__ __volatile__("rex64/fxsave %0"
+ : "=m" (tsk->arch.thread.i387.fxsave));
+#else
+ /* This, however, we can work around by forcing the compiler to select
+ an addressing mode that doesn't require extended registers. */
+ __asm__ __volatile__("rex64/fxsave %P2(%1)"
+ : "=m" (tsk->arch.thread.i387.fxsave)
+ : "cdaSDb" (tsk),
+ "i" (offsetof(__typeof__(*tsk),
+ arch.thread.i387.fxsave)));
+#endif
+ clear_fpu_state(&tsk->arch.thread.i387.fxsave);
+}
+
+static inline void kernel_fpu_begin(void)
+{
+ if (current->arch.flags & TF_USED_FPU) {
+ __fxsave_clear(current);
+ return;
+ }
+ clts();
+}
+
+static inline void kernel_fpu_end(void)
+{
+ stts();
+}
+
+static inline void
+fpu_save_state(struct task_struct *task)
+{
+ __asm__ __volatile__("fxsaveq %0"
+ : "=m" (task->arch.thread.i387.fxsave));
+ clear_fpu_state(&task->arch.thread.i387.fxsave);
+}
+
+static inline void
+fpu_restore_state(struct task_struct *task)
+{
+ __asm__ __volatile__("fxrstorq %0"
+ ::"m" (task->arch.thread.i387.fxsave));
+}
+
+#endif /* _X86_64_I387_H */
--- /dev/null
+#ifndef _ARCH_X86_64_IDT_VECTORS_H
+#define _ARCH_X86_64_IDT_VECTORS_H
+
+/*
+ * Based on linux/include/asm-x86_64/hw_irq.h
+ * Original file header:
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ * moved some of the old arch/i386/kernel/irq.h to here. VY
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ * hacked by Andi Kleen for x86-64.
+ */
+
+/**
+ * This file defines symbolic names for interrupt vectors installed in the
+ * Interrupt Descriptor Table (IDT). The IDT contains entries for 256 interrupt
+ * vectors. Vectors [0,31] are used for specific purposes defined by the x86_64
+ * architecture. Vectors [32,255] are available for external interrupts. The
+ * LWK uses a number of interrupt vectors for its own internal purposes, e.g.,
+ * inter-processor interrupts for TLB invalidations.
+ */
+
+/*
+ * [0,31] Standard x86_64 architecture vectors
+ */
+#define DIVIDE_ERROR_VECTOR 0
+#define DEBUG_VECTOR 1
+#define NMI_VECTOR 2
+#define INT3_VECTOR 3
+#define OVERFLOW_VECTOR 4
+#define BOUNDS_VECTOR 5
+#define INVALID_OP_VECTOR 6
+#define DEVICE_NOT_AVAILABLE_VECTOR 7
+#define DOUBLE_FAULT_VECTOR 8
+#define COPROC_SEGMENT_OVERRUN_VECTOR 9
+#define INVALID_TSS_VECTOR 10
+#define SEGMENT_NOT_PRESENT_VECTOR 11
+#define STACK_SEGMENT_VECTOR 12
+#define GENERAL_PROTECTION_VECTOR 13
+#define PAGE_FAULT_VECTOR 14
+#define SPURIOUS_INTERRUPT_BUG_VECTOR 15
+#define COPROCESSOR_ERROR_VECTOR 16
+#define ALIGNMENT_CHECK_VECTOR 17
+#define MACHINE_CHECK_VECTOR 18
+#define SIMD_COPROCESSOR_ERROR_VECTOR 19
+/*
+ * [20,31] Reserved by x86_64 architecture for future use
+ * [32,47] Free for use by devices
+ * [48,63] Standard ISA IRQs
+ */
+#define IRQ0_VECTOR 48
+#define IRQ1_VECTOR 49
+#define IRQ2_VECTOR 50
+#define IRQ3_VECTOR 51
+#define IRQ4_VECTOR 52
+#define IRQ5_VECTOR 53
+#define IRQ6_VECTOR 54
+#define IRQ7_VECTOR 55
+#define IRQ8_VECTOR 56
+#define IRQ9_VECTOR 57
+#define IRQ10_VECTOR 58
+#define IRQ11_VECTOR 59
+#define IRQ12_VECTOR 60
+#define IRQ13_VECTOR 61
+#define IRQ14_VECTOR 62
+#define IRQ15_VECTOR 63
+/*
+ * [64,238] Free for use by devices
+ * [239,255] Used by LWK for various internal purposes
+ */
+#define APIC_TIMER_VECTOR 239
+#define INVALIDATE_TLB_0_VECTOR 240
+#define INVALIDATE_TLB_1_VECTOR 241
+#define INVALIDATE_TLB_2_VECTOR 242
+#define INVALIDATE_TLB_3_VECTOR 243
+#define INVALIDATE_TLB_4_VECTOR 244
+#define INVALIDATE_TLB_5_VECTOR 245
+#define INVALIDATE_TLB_6_VECTOR 246
+#define INVALIDATE_TLB_7_VECTOR 247
+/* 248 is available */
+#define APIC_PERF_COUNTER_VECTOR 249
+#define APIC_THERMAL_VECTOR 250
+/* 251 is available */
+#define XCALL_FUNCTION_VECTOR 252
+#define XCALL_RESCHEDULE_VECTOR 253
+#define APIC_ERROR_VECTOR 254
+#define APIC_SPURIOUS_VECTOR 255
+
+/**
+ * Meta-defines describing the interrupt vector space defined above.
+ */
+#define NUM_IDT_ENTRIES 256
+#define FIRST_EXTERNAL_VECTOR 32
+#define FIRST_SYSTEM_VECTOR 239
+#define INVALIDATE_TLB_VECTOR_START 240
+#define INVALIDATE_TLB_VECTOR_END 247
+#define NUM_INVALIDATE_TLB_VECTORS 8
+
+#endif
--- /dev/null
+#ifndef _ARCH_IO_H
+#define _ARCH_IO_H
+
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+
+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
+
+#ifdef REALLY_SLOW_IO
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#else
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+/*
+ * Talk about misusing macros..
+ */
+#define __OUT1(s,x) \
+static inline void out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
+
+#define __IN1(s) \
+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+#define IO_SPACE_LIMIT 0xffff
+
+#if defined(__KERNEL__) && __x86_64__
+
+// #include <linux/vmalloc.h>
+
+#ifndef __i386__
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are pretty trivial
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+#endif
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+#include <arch-generic/iomap.h>
+
+extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+extern void *early_ioremap(unsigned long addr, unsigned long size);
+extern void early_iounmap(void *addr, unsigned long size);
+
+/*
+ * This one maps high address device memory and turns off caching for that area.
+ * it's useful if some control registers are in such an area and write combining
+ * or read caching is not desirable:
+ */
+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void iounmap(volatile void __iomem *addr);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus virt_to_phys
+#define isa_page_to_bus page_to_phys
+#define isa_bus_to_virt phys_to_virt
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+static inline __u8 __readb(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u8 *)addr;
+}
+static inline __u16 __readw(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u16 *)addr;
+}
+static __always_inline __u32 __readl(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u32 *)addr;
+}
+static inline __u64 __readq(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u64 *)addr;
+}
+#define readb(x) __readb(x)
+#define readw(x) __readw(x)
+#define readl(x) __readl(x)
+#define readq(x) __readq(x)
+#define readb_relaxed(a) readb(a)
+#define readw_relaxed(a) readw(a)
+#define readl_relaxed(a) readl(a)
+#define readq_relaxed(a) readq(a)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+#define __raw_readq readq
+
+#define mmiowb()
+
+static inline void __writel(__u32 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u32 *)addr = b;
+}
+static inline void __writeq(__u64 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u64 *)addr = b;
+}
+static inline void __writeb(__u8 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u8 *)addr = b;
+}
+static inline void __writew(__u16 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u16 *)addr = b;
+}
+#define writeq(val,addr) __writeq((val),(addr))
+#define writel(val,addr) __writel((val),(addr))
+#define writew(val,addr) __writew((val),(addr))
+#define writeb(val,addr) __writeb((val),(addr))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+#define __raw_writeq writeq
+
+void __memcpy_fromio(void*,unsigned long,unsigned);
+void __memcpy_toio(unsigned long,const void*,unsigned);
+
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
+{
+ __memcpy_fromio(to,(unsigned long)from,len);
+}
+static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
+{
+ __memcpy_toio((unsigned long)to,from,len);
+}
+
+void memset_io(volatile void __iomem *a, int b, size_t c);
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+/*
+ * Again, x86-64 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+static inline int check_signature(void __iomem *io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+#define flush_write_buffers()
+
+extern int iommu_bio_merge;
+#define BIO_VMERGE_BOUNDARY iommu_bio_merge
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+#endif /* __KERNEL__ */
+
+#endif
--- /dev/null
+#ifndef __ASM_IO_APIC_H
+#define __ASM_IO_APIC_H
+
+#include <lwk/spinlock.h>
+#include <arch/types.h>
+#include <arch/mpspec.h>
+
+/*
+ * Intel IO-APIC support for SMP and UP systems.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
+ */
+
+static inline int use_pci_vector(void) {return 1;}
+static inline void disable_edge_ioapic_vector(unsigned int vector) { }
+static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
+static inline void end_edge_ioapic_vector (unsigned int vector) { }
+#define startup_level_ioapic startup_level_ioapic_vector
+#define shutdown_level_ioapic mask_IO_APIC_vector
+#define enable_level_ioapic unmask_IO_APIC_vector
+#define disable_level_ioapic mask_IO_APIC_vector
+#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector
+#define end_level_ioapic end_level_ioapic_vector
+#define set_ioapic_affinity set_ioapic_affinity_vector
+
+#define startup_edge_ioapic startup_edge_ioapic_vector
+#define shutdown_edge_ioapic disable_edge_ioapic_vector
+#define enable_edge_ioapic unmask_IO_APIC_vector
+#define disable_edge_ioapic disable_edge_ioapic_vector
+#define ack_edge_ioapic ack_edge_ioapic_vector
+#define end_edge_ioapic end_edge_ioapic_vector
+
+#define APIC_MISMATCH_DEBUG
+
+#define IO_APIC_BASE(idx) \
+ ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
+ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
+
+/*
+ * The structure of the IO-APIC:
+ */
+union IO_APIC_reg_00 {
+ u32 raw;
+ struct {
+ u32 __reserved_2 : 14,
+ LTS : 1,
+ delivery_type : 1,
+ __reserved_1 : 8,
+ ID : 8;
+ } __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_01 {
+ u32 raw;
+ struct {
+ u32 version : 8,
+ __reserved_2 : 7,
+ PRQ : 1,
+ entries : 8,
+ __reserved_1 : 8;
+ } __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_02 {
+ u32 raw;
+ struct {
+ u32 __reserved_2 : 24,
+ arbitration : 4,
+ __reserved_1 : 4;
+ } __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_03 {
+ u32 raw;
+ struct {
+ u32 boot_DT : 1,
+ __reserved_1 : 31;
+ } __attribute__ ((packed)) bits;
+};
+
+/*
+ * # of IO-APICs and # of IRQ routing registers
+ */
+extern int nr_ioapics;
+extern int nr_ioapic_registers[MAX_IO_APICS];
+
+enum ioapic_trigger_modes {
+ ioapic_edge_sensitive = 0,
+ ioapic_level_sensitive = 1
+};
+
+enum ioapic_pin_polarities {
+ ioapic_active_high = 0,
+ ioapic_active_low = 1
+};
+
+enum ioapic_destination_modes {
+ ioapic_physical_dest = 0,
+ ioapic_logical_dest = 1
+};
+
+enum ioapic_delivery_modes {
+ ioapic_fixed = 0,
+ ioapic_lowest_priority = 1,
+ ioapic_SMI = 2,
+ ioapic_NMI = 4,
+ ioapic_INIT = 5,
+ ioapic_ExtINT = 7
+};
+
+struct IO_APIC_route_entry {
+ __u32 vector : 8,
+ delivery_mode : 3, /* 000: FIXED
+ * 001: lowest prio
+ * 111: ExtINT
+ */
+ dest_mode : 1, /* 0: physical, 1: logical */
+ delivery_status : 1,
+ polarity : 1,
+ irr : 1,
+ trigger : 1, /* 0: edge, 1: level */
+ mask : 1, /* 0: enabled, 1: disabled */
+ __reserved_2 : 15;
+
+ __u32 __reserved_3 : 24,
+ dest : 8;
+} __attribute__ ((packed));
+
+/*
+ * MP-BIOS irq configuration table structures:
+ */
+
+/* I/O APIC entries */
+extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
+
+/* # of MP IRQ source entries */
+extern int mp_irq_entries;
+
+/* MP IRQ source entries */
+extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+/* non-0 if default (table-less) MP configuration */
+extern int mpc_default_type;
+
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+ *IO_APIC_BASE(apic) = reg;
+ return *(IO_APIC_BASE(apic)+4);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ *IO_APIC_BASE(apic) = reg;
+ *(IO_APIC_BASE(apic)+4) = value;
+}
+
+/*
+ * Re-write a value: to be used for read-modify-write
+ * cycles where the read already set up the index register.
+ */
+static inline void io_apic_modify(unsigned int apic, unsigned int value)
+{
+ *(IO_APIC_BASE(apic)+4) = value;
+}
+
+/*
+ * Synchronize the IO-APIC and the CPU by doing
+ * a dummy read from the IO-APIC
+ */
+static inline void io_apic_sync(unsigned int apic)
+{
+ (void) *(IO_APIC_BASE(apic)+4);
+}
+
+/* 1 if "noapic" boot option passed */
+extern int skip_ioapic_setup;
+
+/*
+ * If we use the IO-APIC for IRQ routing, disable automatic
+ * assignment of PCI IRQ's.
+ */
+#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
+
+#ifdef CONFIG_ACPI
+extern int io_apic_get_version (int ioapic);
+extern int io_apic_get_redir_entries (int ioapic);
+extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
+extern int timer_uses_ioapic_pin_0;
+#endif
+
+extern int sis_apic_bug; /* dummy */
+
+extern int assign_irq_vector(int irq);
+
+void enable_NMI_through_LVT0 (void * dummy);
+
+extern spinlock_t i8259A_lock;
+
+extern unsigned int ioapic_num;
+extern unsigned int ioapic_id[MAX_IO_APICS];
+extern unsigned long ioapic_phys_addr[MAX_IO_APICS];
+
+extern void __init ioapic_map(void);
+extern void __init ioapic_init(void);
+extern void ioapic_dump(void);
+
+
+#endif
--- /dev/null
+/*
+ * ldt.h
+ *
+ * Definitions of structures used with the modify_ldt system call.
+ */
+#ifndef _LINUX_LDT_H
+#define _LINUX_LDT_H
+
+/* Maximum number of LDT entries supported. */
+#define LDT_ENTRIES 8192
+/* The size of each LDT entry. */
+#define LDT_ENTRY_SIZE 8
+
+#ifndef __ASSEMBLY__
+/* Note on 64bit base and limit is ignored and you cannot set
+ DS/ES/CS not to the default values if you still want to do syscalls. This
+ call is more for 32bit mode therefore. */
+struct user_desc {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ unsigned int lm:1;
+};
+
+#define MODIFY_LDT_CONTENTS_DATA 0
+#define MODIFY_LDT_CONTENTS_STACK 1
+#define MODIFY_LDT_CONTENTS_CODE 2
+
+#endif /* !__ASSEMBLY__ */
+#endif
--- /dev/null
+#ifndef _ARCH_LINKAGE_H
+#define _ARCH_LINKAGE_H
+
+/* Nothing to see here... */
+
+#endif
--- /dev/null
+#ifndef __X8664_MMAN_H__
+#define __X8664_MMAN_H__
+
+#include <arch-generic/mman.h>
+
+#define MAP_32BIT 0x40 /* only give out 32bit addresses */
+
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+#define MAP_LOCKED 0x2000 /* pages are locked */
+#define MAP_NORESERVE 0x4000 /* don't check for reservations */
+#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+
+#endif
--- /dev/null
+#ifndef _X86_64_MMU_H
+#define _X86_64_MMU_H
+
+#include <lwk/spinlock.h>
+// #include <asm/semaphore.h>
+
+/*
+ * The x86_64 doesn't have a mmu context, but
+ * we put the segment information here.
+ */
+typedef struct {
+ void *ldt;
+ rwlock_t ldtlock;
+ int size;
+// struct semaphore sem;
+} mm_context_t;
+
+#endif
--- /dev/null
+#ifndef __ASM_MPSPEC_H
+#define __ASM_MPSPEC_H
+
+#include <lwk/kernel.h>
+
+/*
+ * Structure definitions for SMP machines following the
+ * Intel Multiprocessing Specification 1.1 and 1.4.
+ */
+
+/*
+ * This tag identifies where the SMP configuration
+ * information is.
+ */
+
+#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
+
+/*
+ * A maximum of 255 APICs with the current APIC ID architecture.
+ */
+#define MAX_APICS 255
+
+struct intel_mp_floating
+{
+ char mpf_signature[4]; /* "_MP_" */
+ unsigned int mpf_physptr; /* Configuration table address */
+ unsigned char mpf_length; /* Our length (paragraphs) */
+ unsigned char mpf_specification;/* Specification version */
+ unsigned char mpf_checksum; /* Checksum (makes sum 0) */
+ unsigned char mpf_feature1; /* Standard or configuration ? */
+ unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
+ unsigned char mpf_feature3; /* Unused (0) */
+ unsigned char mpf_feature4; /* Unused (0) */
+ unsigned char mpf_feature5; /* Unused (0) */
+};
+sizecheck_struct(intel_mp_floating, 16);
+
+struct mp_config_table
+{
+ char mpc_signature[4];
+#define MPC_SIGNATURE "PCMP"
+ unsigned short mpc_length; /* Size of table */
+ char mpc_spec; /* 0x01 */
+ char mpc_checksum;
+ char mpc_oem[8];
+ char mpc_productid[12];
+ unsigned int mpc_oemptr; /* 0 if not present */
+ unsigned short mpc_oemsize; /* 0 if not present */
+ unsigned short mpc_oemcount;
+ unsigned int mpc_lapic; /* APIC address */
+ unsigned int reserved;
+};
+
+/* Followed by entries */
+
+#define MP_PROCESSOR 0
+#define MP_BUS 1
+#define MP_IOAPIC 2
+#define MP_INTSRC 3
+#define MP_LINTSRC 4
+
+struct mpc_config_processor
+{
+ unsigned char mpc_type;
+ unsigned char mpc_apicid; /* Local APIC number */
+ unsigned char mpc_apicver; /* Its versions */
+ unsigned char mpc_cpuflag;
+#define CPU_ENABLED 1 /* Processor is available */
+#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
+ unsigned int mpc_cpufeature;
+#define CPU_STEPPING_MASK 0x0F
+#define CPU_MODEL_MASK 0xF0
+#define CPU_FAMILY_MASK 0xF00
+ unsigned int mpc_featureflag; /* CPUID feature value */
+ unsigned int mpc_reserved[2];
+};
+
+struct mpc_config_bus
+{
+ unsigned char mpc_type;
+ unsigned char mpc_busid;
+ unsigned char mpc_bustype[6];
+};
+
+/* List of Bus Type string values, Intel MP Spec. */
+#define BUSTYPE_EISA "EISA"
+#define BUSTYPE_ISA "ISA"
+#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
+#define BUSTYPE_MCA "MCA"
+#define BUSTYPE_VL "VL" /* Local bus */
+#define BUSTYPE_PCI "PCI"
+#define BUSTYPE_PCMCIA "PCMCIA"
+#define BUSTYPE_CBUS "CBUS"
+#define BUSTYPE_CBUSII "CBUSII"
+#define BUSTYPE_FUTURE "FUTURE"
+#define BUSTYPE_MBI "MBI"
+#define BUSTYPE_MBII "MBII"
+#define BUSTYPE_MPI "MPI"
+#define BUSTYPE_MPSA "MPSA"
+#define BUSTYPE_NUBUS "NUBUS"
+#define BUSTYPE_TC "TC"
+#define BUSTYPE_VME "VME"
+#define BUSTYPE_XPRESS "XPRESS"
+
+struct mpc_config_ioapic
+{
+ unsigned char mpc_type;
+ unsigned char mpc_apicid;
+ unsigned char mpc_apicver;
+ unsigned char mpc_flags;
+#define MPC_APIC_USABLE 0x01
+ unsigned int mpc_apicaddr;
+};
+
+struct mpc_config_intsrc
+{
+ unsigned char mpc_type;
+ unsigned char mpc_irqtype;
+ unsigned short mpc_irqflag;
+ unsigned char mpc_srcbus;
+ unsigned char mpc_srcbusirq;
+ unsigned char mpc_dstapic;
+ unsigned char mpc_dstirq;
+};
+
+enum mp_irq_source_types {
+ mp_INT = 0,
+ mp_NMI = 1,
+ mp_SMI = 2,
+ mp_ExtINT = 3
+};
+
+#define MP_IRQDIR_DEFAULT 0
+#define MP_IRQDIR_HIGH 1
+#define MP_IRQDIR_LOW 3
+
+
+struct mpc_config_lintsrc
+{
+ unsigned char mpc_type;
+ unsigned char mpc_irqtype;
+ unsigned short mpc_irqflag;
+ unsigned char mpc_srcbusid;
+ unsigned char mpc_srcbusirq;
+ unsigned char mpc_destapic;
+#define MP_APIC_ALL 0xFF
+ unsigned char mpc_destapiclint;
+};
+
+/*
+ * Default configurations
+ *
+ * 1 2 CPU ISA 82489DX
+ * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
+ * 3 2 CPU EISA 82489DX
+ * 4 2 CPU MCA 82489DX
+ * 5 2 CPU ISA+PCI
+ * 6 2 CPU EISA+PCI
+ * 7 2 CPU MCA+PCI
+ */
+
+#define MAX_MP_BUSSES 256
+/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
+#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
+enum mp_bustype {
+ MP_BUS_ISA = 1,
+ MP_BUS_EISA,
+ MP_BUS_PCI,
+ MP_BUS_MCA
+};
+extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES];
+extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
+
+extern unsigned int boot_cpu_physical_apicid;
+extern int smp_found_config;
+extern void find_mp_config(void);
+extern void get_mp_config(void);
+extern int nr_ioapics;
+extern unsigned char apic_version [MAX_APICS];
+extern int mp_irq_entries;
+extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
+extern int mpc_default_type;
+extern unsigned long mp_lapic_addr;
+extern int pic_mode;
+
+#ifdef CONFIG_ACPI
+extern void mp_register_lapic (u8 id, u8 enabled);
+extern void mp_register_lapic_address (u64 address);
+
+#ifdef CONFIG_X86_IO_APIC
+extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
+extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
+extern void mp_config_acpi_legacy_irqs (void);
+extern int mp_register_gsi (u32 gsi, int triggering, int polarity);
+#endif /*CONFIG_X86_IO_APIC*/
+#endif
+
+extern int using_apic_timer;
+
+#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
+
+struct physid_mask
+{
+ unsigned long mask[PHYSID_ARRAY_SIZE];
+};
+
+typedef struct physid_mask physid_mask_t;
+
+#define physid_set(physid, map) set_bit(physid, (map).mask)
+#define physid_clear(physid, map) clear_bit(physid, (map).mask)
+#define physid_isset(physid, map) test_bit(physid, (map).mask)
+#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
+
+#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
+#define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS)
+#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
+#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
+#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
+#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
+#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
+#define physids_coerce(map) ((map).mask[0])
+
+#define physids_promote(physids) \
+ ({ \
+ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
+ __physid_mask.mask[0] = physids; \
+ __physid_mask; \
+ })
+
+#define physid_mask_of_physid(physid) \
+ ({ \
+ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
+ physid_set(physid, __physid_mask); \
+ __physid_mask; \
+ })
+
+#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
+#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
+
+extern physid_mask_t phys_cpu_present_map;
+
+#endif
+
--- /dev/null
+#ifndef _X86_64_MSR_H
+#define _X86_64_MSR_H
+
+#ifndef __ASSEMBLY__
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(msr,val1,val2) \
+ __asm__ __volatile__("rdmsr" \
+ : "=a" (val1), "=d" (val2) \
+ : "c" (msr))
+
+
+#define rdmsrl(msr,val) do { unsigned long a__,b__; \
+ __asm__ __volatile__("rdmsr" \
+ : "=a" (a__), "=d" (b__) \
+ : "c" (msr)); \
+ val = a__ | (b__<<32); \
+} while(0)
+
+#define wrmsr(msr,val1,val2) \
+ __asm__ __volatile__("wrmsr" \
+ : /* no outputs */ \
+ : "c" (msr), "a" (val1), "d" (val2))
+
+#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
+
+/* wrmsr with exception handling */
+#define wrmsr_safe(msr,a,b) ({ int ret__; \
+ asm volatile("2: wrmsr ; xorl %0,%0\n" \
+ "1:\n\t" \
+ ".section .fixup,\"ax\"\n\t" \
+ "3: movl %4,%0 ; jmp 1b\n\t" \
+ ".previous\n\t" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n\t" \
+ " .quad 2b,3b\n\t" \
+ ".previous" \
+ : "=a" (ret__) \
+ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
+ ret__; })
+
+#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
+
+#define rdmsr_safe(msr,a,b) \
+ ({ int ret__; \
+ asm volatile ("1: rdmsr\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl %4,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 1b,3b\n" \
+ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
+ :"c"(msr), "i"(-EIO), "0"(0)); \
+ ret__; })
+
+#define rdtsc(low,high) \
+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) do { \
+ unsigned int __a,__d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+} while(0)
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+ __asm__ __volatile__("rdpmc" \
+ : "=a" (low), "=d" (high) \
+ : "c" (counter))
+
+static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op));
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+ int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op), "c" (count));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (op)
+ : "bx", "cx", "dx");
+ return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx)
+ : "0" (op)
+ : "cx", "dx" );
+ return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ecx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
+ return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "bx", "cx");
+ return edx;
+}
+
+#define MSR_IA32_UCODE_WRITE 0x79
+#define MSR_IA32_UCODE_REV 0x8b
+
+
+#endif
+
+/* AMD/K8 specific MSRs */
+#define MSR_EFER 0xc0000080 /* extended feature register */
+#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
+#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
+#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
+#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
+#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
+#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
+#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
+/* EFER bits: */
+#define _EFER_SCE 0 /* SYSCALL/SYSRET */
+#define _EFER_LME 8 /* Long mode enable */
+#define _EFER_LMA 10 /* Long mode active (read-only) */
+#define _EFER_NX 11 /* No execute enable */
+
+#define EFER_SCE (1<<_EFER_SCE)
+#define EFER_LME (1<<_EFER_LME)
+#define EFER_LMA (1<<_EFER_LMA)
+#define EFER_NX (1<<_EFER_NX)
+
+/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_TSC 0x10
+#define MSR_IA32_PLATFORM_ID 0x17
+
+#define MSR_IA32_PERFCTR0 0xc1
+#define MSR_IA32_PERFCTR1 0xc2
+
+#define MSR_MTRRcap 0x0fe
+#define MSR_IA32_BBL_CR_CTL 0x119
+
+#define MSR_IA32_SYSENTER_CS 0x174
+#define MSR_IA32_SYSENTER_ESP 0x175
+#define MSR_IA32_SYSENTER_EIP 0x176
+
+#define MSR_IA32_MCG_CAP 0x179
+#define MSR_IA32_MCG_STATUS 0x17a
+#define MSR_IA32_MCG_CTL 0x17b
+
+#define MSR_IA32_EVNTSEL0 0x186
+#define MSR_IA32_EVNTSEL1 0x187
+
+#define MSR_IA32_DEBUGCTLMSR 0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+#define MSR_IA32_LASTINTFROMIP 0x1dd
+#define MSR_IA32_LASTINTTOIP 0x1de
+
+#define MSR_MTRRfix64K_00000 0x250
+#define MSR_MTRRfix16K_80000 0x258
+#define MSR_MTRRfix16K_A0000 0x259
+#define MSR_MTRRfix4K_C0000 0x268
+#define MSR_MTRRfix4K_C8000 0x269
+#define MSR_MTRRfix4K_D0000 0x26a
+#define MSR_MTRRfix4K_D8000 0x26b
+#define MSR_MTRRfix4K_E0000 0x26c
+#define MSR_MTRRfix4K_E8000 0x26d
+#define MSR_MTRRfix4K_F0000 0x26e
+#define MSR_MTRRfix4K_F8000 0x26f
+#define MSR_MTRRdefType 0x2ff
+
+#define MSR_IA32_MC0_CTL 0x400
+#define MSR_IA32_MC0_STATUS 0x401
+#define MSR_IA32_MC0_ADDR 0x402
+#define MSR_IA32_MC0_MISC 0x403
+
+#define MSR_P6_PERFCTR0 0xc1
+#define MSR_P6_PERFCTR1 0xc2
+#define MSR_P6_EVNTSEL0 0x186
+#define MSR_P6_EVNTSEL1 0x187
+
+/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
+#define MSR_K7_EVNTSEL0 0xC0010000
+#define MSR_K7_PERFCTR0 0xC0010004
+#define MSR_K7_EVNTSEL1 0xC0010001
+#define MSR_K7_PERFCTR1 0xC0010005
+#define MSR_K7_EVNTSEL2 0xC0010002
+#define MSR_K7_PERFCTR2 0xC0010006
+#define MSR_K7_EVNTSEL3 0xC0010003
+#define MSR_K7_PERFCTR3 0xC0010007
+#define MSR_K8_TOP_MEM1 0xC001001A
+#define MSR_K8_TOP_MEM2 0xC001001D
+#define MSR_K8_SYSCFG 0xC0010010
+#define MSR_K8_HWCR 0xC0010015
+#define MSR_K8_FIDVID_STATUS 0xC0010042
+
+/* AMD K10 MSRs */
+#define MSR_K10_COFVID_STATUS 0xC0010071
+
+/* K6 MSRs */
+#define MSR_K6_EFER 0xC0000080
+#define MSR_K6_STAR 0xC0000081
+#define MSR_K6_WHCR 0xC0000082
+#define MSR_K6_UWCCR 0xC0000085
+#define MSR_K6_PSOR 0xC0000087
+#define MSR_K6_PFIR 0xC0000088
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x107
+#define MSR_IDT_FCR2 0x108
+#define MSR_IDT_FCR3 0x109
+#define MSR_IDT_FCR4 0x10a
+
+#define MSR_IDT_MCR0 0x110
+#define MSR_IDT_MCR1 0x111
+#define MSR_IDT_MCR2 0x112
+#define MSR_IDT_MCR3 0x113
+#define MSR_IDT_MCR4 0x114
+#define MSR_IDT_MCR5 0x115
+#define MSR_IDT_MCR6 0x116
+#define MSR_IDT_MCR7 0x117
+#define MSR_IDT_MCR_CTRL 0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x1107
+#define MSR_VIA_LONGHAUL 0x110a
+#define MSR_VIA_RNG 0x110b
+#define MSR_VIA_BCR2 0x1147
+
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0
+#define MSR_IA32_P5_MC_TYPE 1
+#define MSR_IA32_PLATFORM_ID 0x17
+#define MSR_IA32_EBL_CR_POWERON 0x2a
+
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX 0x180
+#define MSR_IA32_MCG_EBX 0x181
+#define MSR_IA32_MCG_ECX 0x182
+#define MSR_IA32_MCG_EDX 0x183
+#define MSR_IA32_MCG_ESI 0x184
+#define MSR_IA32_MCG_EDI 0x185
+#define MSR_IA32_MCG_EBP 0x186
+#define MSR_IA32_MCG_ESP 0x187
+#define MSR_IA32_MCG_EFLAGS 0x188
+#define MSR_IA32_MCG_EIP 0x189
+#define MSR_IA32_MCG_RESERVED 0x18A
+
+#define MSR_P6_EVNTSEL0 0x186
+#define MSR_P6_EVNTSEL1 0x187
+
+#define MSR_IA32_PERF_STATUS 0x198
+#define MSR_IA32_PERF_CTL 0x199
+
+#define MSR_IA32_THERM_CONTROL 0x19a
+#define MSR_IA32_THERM_INTERRUPT 0x19b
+#define MSR_IA32_THERM_STATUS 0x19c
+#define MSR_IA32_MISC_ENABLE 0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR 0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+#define MSR_IA32_LASTINTFROMIP 0x1dd
+#define MSR_IA32_LASTINTTOIP 0x1de
+
+#define MSR_IA32_MC0_CTL 0x400
+#define MSR_IA32_MC0_STATUS 0x401
+#define MSR_IA32_MC0_ADDR 0x402
+#define MSR_IA32_MC0_MISC 0x403
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0 0x300
+#define MSR_P4_BPU_PERFCTR1 0x301
+#define MSR_P4_BPU_PERFCTR2 0x302
+#define MSR_P4_BPU_PERFCTR3 0x303
+#define MSR_P4_MS_PERFCTR0 0x304
+#define MSR_P4_MS_PERFCTR1 0x305
+#define MSR_P4_MS_PERFCTR2 0x306
+#define MSR_P4_MS_PERFCTR3 0x307
+#define MSR_P4_FLAME_PERFCTR0 0x308
+#define MSR_P4_FLAME_PERFCTR1 0x309
+#define MSR_P4_FLAME_PERFCTR2 0x30a
+#define MSR_P4_FLAME_PERFCTR3 0x30b
+#define MSR_P4_IQ_PERFCTR0 0x30c
+#define MSR_P4_IQ_PERFCTR1 0x30d
+#define MSR_P4_IQ_PERFCTR2 0x30e
+#define MSR_P4_IQ_PERFCTR3 0x30f
+#define MSR_P4_IQ_PERFCTR4 0x310
+#define MSR_P4_IQ_PERFCTR5 0x311
+#define MSR_P4_BPU_CCCR0 0x360
+#define MSR_P4_BPU_CCCR1 0x361
+#define MSR_P4_BPU_CCCR2 0x362
+#define MSR_P4_BPU_CCCR3 0x363
+#define MSR_P4_MS_CCCR0 0x364
+#define MSR_P4_MS_CCCR1 0x365
+#define MSR_P4_MS_CCCR2 0x366
+#define MSR_P4_MS_CCCR3 0x367
+#define MSR_P4_FLAME_CCCR0 0x368
+#define MSR_P4_FLAME_CCCR1 0x369
+#define MSR_P4_FLAME_CCCR2 0x36a
+#define MSR_P4_FLAME_CCCR3 0x36b
+#define MSR_P4_IQ_CCCR0 0x36c
+#define MSR_P4_IQ_CCCR1 0x36d
+#define MSR_P4_IQ_CCCR2 0x36e
+#define MSR_P4_IQ_CCCR3 0x36f
+#define MSR_P4_IQ_CCCR4 0x370
+#define MSR_P4_IQ_CCCR5 0x371
+#define MSR_P4_ALF_ESCR0 0x3ca
+#define MSR_P4_ALF_ESCR1 0x3cb
+#define MSR_P4_BPU_ESCR0 0x3b2
+#define MSR_P4_BPU_ESCR1 0x3b3
+#define MSR_P4_BSU_ESCR0 0x3a0
+#define MSR_P4_BSU_ESCR1 0x3a1
+#define MSR_P4_CRU_ESCR0 0x3b8
+#define MSR_P4_CRU_ESCR1 0x3b9
+#define MSR_P4_CRU_ESCR2 0x3cc
+#define MSR_P4_CRU_ESCR3 0x3cd
+#define MSR_P4_CRU_ESCR4 0x3e0
+#define MSR_P4_CRU_ESCR5 0x3e1
+#define MSR_P4_DAC_ESCR0 0x3a8
+#define MSR_P4_DAC_ESCR1 0x3a9
+#define MSR_P4_FIRM_ESCR0 0x3a4
+#define MSR_P4_FIRM_ESCR1 0x3a5
+#define MSR_P4_FLAME_ESCR0 0x3a6
+#define MSR_P4_FLAME_ESCR1 0x3a7
+#define MSR_P4_FSB_ESCR0 0x3a2
+#define MSR_P4_FSB_ESCR1 0x3a3
+#define MSR_P4_IQ_ESCR0 0x3ba
+#define MSR_P4_IQ_ESCR1 0x3bb
+#define MSR_P4_IS_ESCR0 0x3b4
+#define MSR_P4_IS_ESCR1 0x3b5
+#define MSR_P4_ITLB_ESCR0 0x3b6
+#define MSR_P4_ITLB_ESCR1 0x3b7
+#define MSR_P4_IX_ESCR0 0x3c8
+#define MSR_P4_IX_ESCR1 0x3c9
+#define MSR_P4_MOB_ESCR0 0x3aa
+#define MSR_P4_MOB_ESCR1 0x3ab
+#define MSR_P4_MS_ESCR0 0x3c0
+#define MSR_P4_MS_ESCR1 0x3c1
+#define MSR_P4_PMH_ESCR0 0x3ac
+#define MSR_P4_PMH_ESCR1 0x3ad
+#define MSR_P4_RAT_ESCR0 0x3bc
+#define MSR_P4_RAT_ESCR1 0x3bd
+#define MSR_P4_SAAT_ESCR0 0x3ae
+#define MSR_P4_SAAT_ESCR1 0x3af
+#define MSR_P4_SSU_ESCR0 0x3be
+#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
+#define MSR_P4_TBPU_ESCR0 0x3c2
+#define MSR_P4_TBPU_ESCR1 0x3c3
+#define MSR_P4_TC_ESCR0 0x3c4
+#define MSR_P4_TC_ESCR1 0x3c5
+#define MSR_P4_U2L_ESCR0 0x3b0
+#define MSR_P4_U2L_ESCR1 0x3b1
+
+#endif
--- /dev/null
+#ifndef _X86_64_PAGE_H
+#define _X86_64_PAGE_H
+
+#include <lwk/const.h>
+
+/**
+ * Define the base page size, 4096K on x86_64.
+ * PAGE_SHIFT defines the base page size.
+ */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
+
+/**
+ * The kernel is mapped into the virtual address space of every task:
+ *
+ * [PAGE_OFFSET, TOP_OF_MEMORY) Kernel-space virtual memory region
+ * [0, PAGE_OFFSET] User-space virtual memory region
+ */
+#define PAGE_OFFSET _AC(0xffff810000000000, UL)
+
+/**
+ * The bootloader loads the LWK at address __PHYSICAL_START in physical memory.
+ * This must be aligned on a 2 MB page boundary... or else.
+ */
+#define __PHYSICAL_START CONFIG_PHYSICAL_START
+#define __KERNEL_ALIGN 0x200000
+
+/**
+ * If you hit this error when compiling the LWK, change your config file so that
+ * CONFIG_PHYSICAL_START is aligned to a 2 MB boundary.
+ */
+#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
+#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
+#endif
+
+/**
+ * The kernel page tables map the kernel image text and data starting at
+ * virtual address __START_KERNEL_map. The kernel text starts at
+ * __START_KERNEL.
+ */
+#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+
+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
+#define __PHYSICAL_MASK_SHIFT 46
+#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
+#define __VIRTUAL_MASK_SHIFT 48
+#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
+
+#define TASK_ORDER 1
+#define TASK_SIZE (PAGE_SIZE << TASK_ORDER)
+#define CURRENT_MASK (~(TASK_SIZE-1))
+
+#define EXCEPTION_STACK_ORDER 0
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+
+#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER
+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
+
+#define IRQSTACK_ORDER 2
+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+
+#define STACKFAULT_STACK 1
+#define DOUBLEFAULT_STACK 2
+#define NMI_STACK 3
+#define DEBUG_STACK 4
+#define MCE_STACK 5
+#define N_EXCEPTION_STACKS 5 /* hw limit is 7 */
+
+/**
+ * Macros for converting between physical address and kernel virtual address.
+ * NOTE: These only work for kernel virtual addresses in the identity map.
+ */
+#ifndef __ASSEMBLY__
+extern unsigned long __phys_addr(unsigned long virt_addr);
+#endif
+#define __pa(x) __phys_addr((unsigned long)(x))
+#define __pa_symbol(x) __phys_addr((unsigned long)(x))
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define __boot_va(x) __va(x)
+#define __boot_pa(x) __pa(x)
+
+#ifndef __ASSEMBLY__
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pud; } pud_t;
+typedef struct { unsigned long pgd; } pgd_t;
+#define PTE_MASK PHYSICAL_PAGE_MASK
+
+
+extern pud_t level3_kernel_pgt[512];
+extern pud_t level3_physmem_pgt[512];
+extern pud_t level3_ident_pgt[512];
+extern pmd_t level2_kernel_pgt[512];
+extern pgd_t init_level4_pgt[];
+extern pgd_t boot_level4_pgt[];
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+extern unsigned long end_pfn;
+
+#endif
+
+#define PTRS_PER_PGD 512
+#define KERNEL_TEXT_SIZE (40*1024*1024)
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pud_val(x) ((x).pud)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pud(x) ((pud_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
+
+#endif
--- /dev/null
+#ifndef _ARCH_X86_64_PAGE_TABLE_H
+#define _ARCH_X86_64_PAGE_TABLE_H
+
+typedef struct {
+ uint64_t
+ present :1, /* Is there a physical page? */
+ write :1, /* Is the page writable? */
+ user :1, /* Is the page accessible to user-space? */
+ pwt :1, /* Is the page write-through cached? */
+ pcd :1, /* Is the page uncached? */
+ accessed :1, /* Has the page been read? */
+ dirty :1, /* Has the page been written to? */
+ pagesize :1, /* 0 == 4KB, 1 == (2 MB || 1 GB) */
+ global :1, /* Is the page mapped in all address spaces? */
+ os_bits_1 :3, /* Available for us! */
+ base_paddr :40, /* Bits [51,12] of base address. */
+ os_bits_2 :11, /* Available for us! */
+ no_exec :1; /* Is the page executable? */
+} xpte_t;
+
+typedef struct {
+ uint64_t
+ present :1, /* Is there a physical page? */
+ write :1, /* Is the page writable? */
+ user :1, /* Is the page accessible to user-space? */
+ pwt :1, /* Is the page write-through cached? */
+ pcd :1, /* Is the page uncached? */
+ accessed :1, /* Has the page been read? */
+ dirty :1, /* Has the page been written to? */
+ pat :1, /* Page attribute table bit. */
+ global :1, /* Is the page mapped in all address spaces? */
+ os_bits_1 :3, /* Available for us! */
+ base_paddr :40, /* Bits [51,12] of page's base physical addr. */
+ os_bits_2 :11, /* Available for us! */
+ no_exec :1; /* Is the page executable? */
+} xpte_4KB_t;
+
+typedef struct {
+ uint64_t
+ present :1, /* Is there a physical page? */
+ write :1, /* Is the page writable? */
+ user :1, /* Is the page accessible to user-space? */
+ pwt :1, /* Is the page write-through cached? */
+ pcd :1, /* Is the page uncached? */
+ accessed :1, /* Has the page been read? */
+ dirty :1, /* Has the page been written to? */
+ must_be_1 :1, /* Must be 1 to indicate a 2 MB page. */
+ global :1, /* Is the page mapped in all address spaces? */
+ os_bits_1 :3, /* Available for us! */
+ pat :1, /* Page attribute table bit. */
+ must_be_0 :8, /* Reserved, must be zero. */
+ base_paddr :31, /* Bits [51,21] of page's base physical addr. */
+ os_bits_2 :11, /* Available for us! */
+ no_exec :1; /* Is the page executable? */
+} xpte_2MB_t;
+
+typedef struct {
+ uint64_t
+ present :1, /* Is there a physical page? */
+ write :1, /* Is the page writable? */
+ user :1, /* Is the page accessible to user-space? */
+ pwt :1, /* Is the page write-through cached? */
+ pcd :1, /* Is the page uncached? */
+ accessed :1, /* Has the page been read? */
+ dirty :1, /* Has the page been written to? */
+ must_be_1 :1, /* Must be 1 to indicate a 1GB page. */
+ global :1, /* Is the page mapped in all address spaces? */
+ os_bits_1 :3, /* Available for us! */
+ pat :1, /* Page attribute table bit. */
+ must_be_0 :17, /* Reserved, must be zero. */
+ base_paddr :22, /* Bits [51,30] of page's base physical addr. */
+ os_bits_2 :11, /* Available for us! */
+ no_exec :1; /* Is the page executable? */
+} xpte_1GB_t;
+
+#endif
--- /dev/null
+#ifndef _ASMx86_64_PARAM_H
+#define _ASMx86_64_PARAM_H
+
+#ifdef __KERNEL__
+# define USER_HZ 100 /* .. some user interfaces are in "ticks */
+#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif
--- /dev/null
+#ifndef _X86_64_PDA_H
+#define _X86_64_PDA_H
+
+#ifndef __ASSEMBLY__
+#include <lwk/stddef.h>
+#include <lwk/types.h>
+#include <lwk/cache.h>
+#include <arch/page.h>
+
+/* Per processor datastructure. %gs points to it while the kernel runs */
+struct x8664_pda {
+ struct task_struct *pcurrent; /* Current process */
+ unsigned long data_offset; /* Per cpu data offset from linker address */
+ unsigned long kernelstack; /* top of kernel stack for current */
+ unsigned long oldrsp; /* user rsp for system call */
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ unsigned long debugstack; /* #DB/#BP stack. */
+#endif
+ int irqcount; /* Irq nesting counter. Starts with -1 */
+ int cpunumber; /* Logical CPU number */
+ char *irqstackptr; /* top of irqstack */
+ int nodenumber; /* number of current node */
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* number of NMI on this CPUs */
+ int mmu_state;
+ struct aspace *active_aspace;
+ unsigned apic_timer_irqs;
+} ____cacheline_aligned_in_smp;
+
+extern struct x8664_pda *_cpu_pda[];
+extern struct x8664_pda boot_cpu_pda[];
+
+void pda_init(unsigned int cpu, struct task_struct *task);
+
+#define cpu_pda(i) (_cpu_pda[i])
+
+/*
+ * There is no fast way to get the base address of the PDA, all the accesses
+ * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
+ */
+#define sizeof_field(type,field) (sizeof(((type *)0)->field))
+#define typeof_field(type,field) typeof(((type *)0)->field)
+
+extern void __bad_pda_field(void);
+
+#define pda_offset(field) offsetof(struct x8664_pda, field)
+
+#define pda_to_op(op,field,val) do { \
+ typedef typeof_field(struct x8664_pda, field) T__; \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+case 2: \
+asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
+case 4: \
+asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
+case 8: \
+asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \
+ default: __bad_pda_field(); \
+ } \
+ } while (0)
+
+/*
+ * AK: PDA read accesses should be neither volatile nor have an memory clobber.
+ * Unfortunately removing them causes all hell to break lose currently.
+ */
+#define pda_from_op(op,field) ({ \
+ typeof_field(struct x8664_pda, field) ret__; \
+ switch (sizeof_field(struct x8664_pda, field)) { \
+case 2: \
+asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+case 4: \
+asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+case 8: \
+asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\
+ default: __bad_pda_field(); \
+ } \
+ ret__; })
+
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+#define or_pda(field,val) pda_to_op("or",field,val)
+
+#endif
+
+#define PDA_STACKOFFSET (5*8)
+
+#endif
--- /dev/null
+#ifndef _X86_64_PERCPU_H
+#define _X86_64_PERCPU_H
+#include <lwk/compiler.h>
+
+/* Same as asm-generic/percpu.h, except that we store the per cpu offset
+ in the PDA. Longer term the PDA and every per cpu variable
+ should be just put into a single section and referenced directly
+ from %gs */
+
+#include <arch/pda.h>
+
+#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
+#define __my_cpu_offset() read_pda(data_offset)
+
+/* Separate out the type, so (int[3], foo) works. */
+#define DEFINE_PER_CPU(type, name) \
+ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+
+/* var is in discarded region: offset to particular copy we want */
+#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
+#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
+
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+
+#endif /* _X86_64_PERCPU_H */
--- /dev/null
+#ifndef _X86_64_PGTABLE_H
+#define _X86_64_PGTABLE_H
+
+#include <lwk/const.h>
+#ifndef __ASSEMBLY__
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the x86-64 page table tree.
+ */
+#include <lwk/spinlock.h>
+#include <arch/fixmap.h>
+#include <arch/processor.h>
+#include <arch/bitops.h>
+#include <arch/pda.h>
+
+extern pud_t level3_kernel_pgt[512];
+extern pud_t level3_ident_pgt[512];
+extern pmd_t level2_kernel_pgt[512];
+extern pgd_t init_level4_pgt[];
+extern unsigned long __supported_pte_mask;
+
+#define swapper_pg_dir init_level4_pgt
+
+extern void paging_init(void);
+extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PGDIR_SHIFT 39
+#define PTRS_PER_PGD 512
+
+/*
+ * 3rd level page
+ */
+#define PUD_SHIFT 30
+#define PTRS_PER_PUD 512
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT 21
+#define PTRS_PER_PMD 512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE 512
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+
+#define pgd_none(x) (!pgd_val(x))
+#define pud_none(x) (!pud_val(x))
+
+static inline void set_pte(pte_t *dst, pte_t val)
+{
+ pte_val(*dst) = pte_val(val);
+}
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+static inline void set_pmd(pmd_t *dst, pmd_t val)
+{
+ pmd_val(*dst) = pmd_val(val);
+}
+
+static inline void set_pud(pud_t *dst, pud_t val)
+{
+ pud_val(*dst) = pud_val(val);
+}
+
+static inline void pud_clear (pud_t *pud)
+{
+ set_pud(pud, __pud(0));
+}
+
+static inline void set_pgd(pgd_t *dst, pgd_t val)
+{
+ pgd_val(*dst) = pgd_val(val);
+}
+
+static inline void pgd_clear (pgd_t * pgd)
+{
+ set_pgd(pgd, __pgd(0));
+}
+
+#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))
+
+struct mm_struct;
+
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
+{
+ pte_t pte;
+ if (full) {
+ pte = *ptep;
+ *ptep = __pte(0);
+ } else {
+ pte = ptep_get_and_clear(mm, addr, ptep);
+ }
+ return pte;
+}
+
+#define pte_same(a, b) ((a).pte == (b).pte)
+
+#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
+
+#endif /* !__ASSEMBLY__ */
+
+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
+#define FIRST_USER_ADDRESS 0
+
+#define MAXMEM _AC(0x3fffffffffff, UL)
+#define VMALLOC_START _AC(0xffffc20000000000, UL)
+#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
+#define MODULES_VADDR _AC(0xffffffff88000000, UL)
+#define MODULES_END _AC(0xfffffffffff00000, UL)
+#define MODULES_LEN (MODULES_END - MODULES_VADDR)
+
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080 /* 2MB page */
+#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
+
+#define _PAGE_PROTNONE 0x080 /* If not present */
+#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY PAGE_COPY_NOEXEC
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+#define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
+#define __PAGE_KERNEL_RO \
+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+#define __PAGE_KERNEL_VSYSCALL \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE \
+ (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC \
+ (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
+
+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
+#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
+
+/* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+#ifndef __ASSEMBLY__
+
+static inline unsigned long pgd_bad(pgd_t pgd)
+{
+ return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
+
+static inline unsigned long pud_bad(pud_t pud)
+{
+ return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
+
+static inline unsigned long pmd_bad(pmd_t pmd)
+{
+ return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
+ right? */
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ pte_t pte;
+ pte_val(pte) = (page_nr << PAGE_SHIFT);
+ pte_val(pte) |= pgprot_val(pgprot);
+ pte_val(pte) &= __supported_pte_mask;
+ return pte;
+}
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
+static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
+
+static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
+static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
+static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; }
+
+struct vm_area_struct;
+
+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ if (!pte_dirty(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
+}
+
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
+}
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ clear_bit(_PAGE_BIT_RW, &ptep->pte);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
+
+static inline int pmd_large(pmd_t pte) {
+ return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
+}
+
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+/*
+ * Level 4 access.
+ */
+#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
+#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
+#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
+#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
+
+/* PUD - Level3 access */
+/* to find an entry in a page-table-directory. */
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
+#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
+#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
+
+/* PMD - Level 2 access */
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
+ pmd_index(address))
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
+#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
+
+/* PTE - Level 1 access. */
+
+/* page, protection -> pte */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
+
+/* Change flags of a PTE */
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) &= _PAGE_CHG_MASK;
+ pte_val(pte) |= pgprot_val(newprot);
+ pte_val(pte) &= __supported_pte_mask;
+ return pte;
+}
+
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
+ pte_index(address))
+
+/* x86-64 always has all page tables mapped. */
+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
+#define pte_unmap(pte) /* NOP */
+#define pte_unmap_nested(pte) /* NOP */
+
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+
+/* We only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time. */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed && __dirty) { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __changed; \
+})
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x) (((x).val >> 1) & 0x3f)
+#define __swp_offset(x) ((x).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+extern spinlock_t pgd_lock;
+extern struct list_head pgd_list;
+
+extern int kern_addr_valid(unsigned long addr);
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#define pgtable_cache_init() do { } while (0)
+#define check_pgt_cache() do { } while (0)
+
+#define PAGE_AGP PAGE_KERNEL_NOCACHE
+#define HAVE_PAGE_AGP 1
+
+/* fs/proc/kcore.c */
+#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
+#define kc_offset_to_vaddr(o) \
+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#include <arch-generic/pgtable.h>
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _X86_64_PGTABLE_H */
--- /dev/null
+#ifndef _X86_64_POSIX_TYPES_H
+#define _X86_64_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned int __kernel_mode_t;
+typedef unsigned long __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef int __kernel_ipc_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+typedef unsigned long __kernel_uintptr_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef long long __kernel_loff_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+#ifdef __KERNEL__
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant cases (8 or 32 longs,
+ * for 256 and 1024-bit fd_sets respectively)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned long *tmp = p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 32:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
+ tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
+ tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
+ tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
+ return;
+ case 16:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ return;
+ case 8:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ return;
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
+ }
+ }
+ i = __FDSET_LONGS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+#endif /* defined(__KERNEL__) */
+
+#endif
--- /dev/null
+#ifndef X86_64_PRCTL_H
+#define X86_64_PRCTL_H 1
+
+#define ARCH_SET_GS 0x1001
+#define ARCH_SET_FS 0x1002
+#define ARCH_GET_FS 0x1003
+#define ARCH_GET_GS 0x1004
+
+
+#endif
--- /dev/null
+/*
+ * include/asm-x86_64/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef _X86_64_PROCESSOR_H
+#define _X86_64_PROCESSOR_H
+
+#include <arch/segment.h>
+#include <arch/page.h>
+#include <arch/types.h>
+#include <arch/sigcontext.h>
+#include <arch/cpufeature.h>
+/* #include <linux/threads.h> */
+#include <arch/msr.h>
+#include <arch/current.h>
+#include <arch/system.h>
+/* #include <arch/mmsegment.h> */
+#include <arch/percpu.h>
+/* #include <lwk/personality.h> */
+#include <lwk/cpumask.h>
+#include <lwk/cache.h>
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
+#define ID_MASK 0x00200000
+
+#define desc_empty(desc) \
+ (!((desc)->a | (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NUM 8
+#define X86_VENDOR_UNKNOWN 0xff
+
+extern char ignore_irq13;
+
+extern void identify_cpu(void);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ __asm__("movq %%cr4,%%rax\n\t"
+ "orq %0,%%rax\n\t"
+ "movq %%rax,%%cr4\n"
+ : : "irg" (mask)
+ :"ax");
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features &= ~mask;
+ __asm__("movq %%cr4,%%rax\n\t"
+ "andq %0,%%rax\n\t"
+ "movq %%rax,%%cr4\n"
+ : : "irg" (~mask)
+ :"ax");
+}
+
+
+/*
+ * Size of io_bitmap.
+ */
+#define IO_BITMAP_BITS 65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct i387_fxsave_struct {
+ u16 cwd;
+ u16 swd;
+ u16 twd;
+ u16 fop;
+ u64 rip;
+ u64 rdp;
+ u32 mxcsr;
+ u32 mxcsr_mask;
+ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
+ u32 padding[24];
+} __attribute__ ((aligned (16)));
+
+union i387_union {
+ struct i387_fxsave_struct fxsave;
+};
+
+struct tss_struct {
+ u32 reserved1;
+ u64 rsp0;
+ u64 rsp1;
+ u64 rsp2;
+ u64 reserved2;
+ u64 ist[7];
+ u32 reserved3;
+ u32 reserved4;
+ u16 reserved5;
+ u16 io_bitmap_base;
+ /*
+ * The extra 1 is there because the CPU will access an
+ * additional byte beyond the end of the IO permission
+ * bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit. Thus we have:
+ *
+ * 128 bytes, the bitmap itself, for ports 0..0x3ff
+ * 8 bytes, for an extra "long" of ~0UL
+ */
+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+} __attribute__((packed)) ____cacheline_aligned;
+
+DECLARE_PER_CPU(struct tss_struct,tss);
+
+#ifdef CONFIG_X86_VSMP
+#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
+#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
+#else
+#define ARCH_MIN_TASKALIGN 16
+#define ARCH_MIN_MMSTRUCT_ALIGN 0
+#endif
+
+struct thread_struct {
+ unsigned long rsp0;
+ unsigned long rsp;
+ unsigned long userrsp; /* Copy from PDA */
+ unsigned long fs;
+ unsigned long gs;
+ unsigned short es, ds, fsindex, gsindex;
+/* Hardware debugging registers */
+ unsigned long debugreg0;
+ unsigned long debugreg1;
+ unsigned long debugreg2;
+ unsigned long debugreg3;
+ unsigned long debugreg6;
+ unsigned long debugreg7;
+/* fault info */
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387 __attribute__((aligned(16)));
+/* IO permissions. the bitmap could be moved into the GDT, that would make
+ switch faster for a limited number of ioperm using tasks. -AK */
+ int ioperm;
+ unsigned long *io_bitmap_ptr;
+ unsigned io_bitmap_max;
+/* cached TLS descriptors. */
+ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
+} __attribute__((aligned(16)));
+
+#define BOOTSTRAP_THREAD { \
+ .rsp0 = (unsigned long)&bootstrap_stack + sizeof(bootstrap_stack) \
+}
+
+#define BOOTSTRAP_TSS { \
+ .rsp0 = (unsigned long)&bootstrap_stack + sizeof(bootstrap_stack) \
+}
+
+#define INIT_MMAP \
+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
+
+#define start_thread(regs,new_rip,new_rsp) do { \
+ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
+ load_gs_index(0); \
+ (regs)->rip = (new_rip); \
+ (regs)->rsp = (new_rsp); \
+ write_pda(oldrsp, (new_rsp)); \
+ (regs)->cs = __USER_CS; \
+ (regs)->ss = __USER_DS; \
+ (regs)->eflags = 0x200; \
+ set_fs(USER_DS); \
+} while(0)
+
+#define get_debugreg(var, register) \
+ __asm__("movq %%db" #register ", %0" \
+ :"=r" (var))
+#define set_debugreg(value, register) \
+ __asm__("movq %0,%%db" #register \
+ : /* no output */ \
+ :"r" (value))
+
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/*
+ * Return saved PC of a blocked thread.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ */
+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
+
+extern unsigned long get_wchan(struct task_struct *p);
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
+
+
+struct microcode_header {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+};
+
+struct microcode {
+ struct microcode_header hdr;
+ unsigned int bits[0];
+};
+
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+};
+
+
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
+
+/* Opteron nops */
+#define K8_NOP1 ".byte 0x90\n"
+#define K8_NOP2 ".byte 0x66,0x90\n"
+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
+#define K8_NOP5 K8_NOP3 K8_NOP2
+#define K8_NOP6 K8_NOP3 K8_NOP3
+#define K8_NOP7 K8_NOP4 K8_NOP3
+#define K8_NOP8 K8_NOP4 K8_NOP4
+
+#define ASM_NOP_MAX 8
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop": : :"memory");
+}
+
+/* Stop speculative execution */
+static inline void sync_core(void)
+{
+ int tmp;
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
+}
+
+#define cpu_has_fpu 1
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(void *x)
+{
+ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+
+#define ARCH_HAS_PREFETCHW 1
+static inline void prefetchw(void *x)
+{
+ asm volatile("prefetchtw %0" :: "m" (*(unsigned long *)x));
+}
+
+#define ARCH_HAS_SPINLOCK_PREFETCH 1
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#define cpu_relax() rep_nop()
+
+static inline void serialize_cpu(void)
+{
+ __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
+}
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax,%ecx,%edx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc8;"
+ : :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax,%ecx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc9;"
+ : :"a" (eax), "c" (ecx));
+}
+
+#define stack_current() \
+({ \
+ struct thread_info *ti; \
+ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ ti->task; \
+})
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+extern unsigned long boot_option_idle_override;
+/* Boot loader type from the setup header */
+extern int bootloader_type;
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
+
+#endif /* _X86_64_PROCESSOR_H */
--- /dev/null
+#ifndef _X86_64_PROTO_H
+#define _X86_64_PROTO_H
+
+/* misc architecture specific prototypes */
+
+extern void early_idt_handler(void);
+
+extern char boot_exception_stacks[];
+
+extern unsigned long table_start, table_end;
+
+void init_kernel_pgtables(unsigned long start, unsigned long end);
+
+extern unsigned long end_pfn_map;
+
+extern void init_resources(void);
+
+extern unsigned long ebda_addr, ebda_size;
+
+extern int unhandled_signal(struct task_struct *tsk, int sig);
+
+extern void asm_syscall(void);
+extern void asm_syscall_ignore(void);
+
+extern unsigned long __phys_addr(unsigned long virt_addr);
+
+void __init interrupts_init(void);
+
+extern paddr_t initrd_start, initrd_end;
+
+#endif
--- /dev/null
+#ifndef _X86_64_PTRACE_H
+#define _X86_64_PTRACE_H
+
+#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
+#define R15 0
+#define R14 8
+#define R13 16
+#define R12 24
+#define RBP 32
+#define RBX 40
+/* arguments: interrupts/non tracing syscalls only save upto here*/
+#define R11 48
+#define R10 56
+#define R9 64
+#define R8 72
+#define RAX 80
+#define RCX 88
+#define RDX 96
+#define RSI 104
+#define RDI 112
+#define ORIG_RAX 120 /* = ERROR_CODE */
+#define ERROR_CODE 120
+/* end of arguments */
+/* cpu exception frame or undefined in case of fast syscall. */
+#define RIP 128
+#define CS 136
+#define EFLAGS 144
+#define RSP 152
+#define SS 160
+#define ARGOFFSET R11
+#endif /* __ASSEMBLY__ */
+
+/* top of stack page */
+#define FRAME_SIZE 168
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+/* top of stack page */
+};
+
+#endif
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETFPXREGS 18
+#define PTRACE_SETFPXREGS 19
+
+/* only useful for access 32bit programs */
+#define PTRACE_GET_THREAD_AREA 25
+#define PTRACE_SET_THREAD_AREA 26
+
+#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#define user_mode(regs) (!!((regs)->cs & 3))
+#define user_mode_vm(regs) user_mode(regs)
+#define instruction_pointer(regs) ((regs)->rip)
+extern unsigned long profile_pc(struct pt_regs *regs);
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+
+struct task_struct;
+
+extern unsigned long
+convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
+
+enum {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
+#endif
+
+#endif
--- /dev/null
+/* include/arch-x86_64/rwlock.h
+ *
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ * Copyright 2001,2002 SuSE labs
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ARCH_X86_64_RWLOCK_H
+#define _ARCH_X86_64_RWLOCK_H
+
+#include <lwk/stringify.h>
+
+#define RW_LOCK_BIAS 0x01000000
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper) \
+ asm volatile("lock ; subl $1,(%0)\n\t" \
+ "js 2f\n" \
+ "1:\n" \
+ LOCK_SECTION_START("") \
+ "2:\tcall " helper "\n\t" \
+ "jmp 1b\n" \
+ LOCK_SECTION_END \
+ ::"a" (rw) : "memory")
+
+#define __build_read_lock_const(rw, helper) \
+ asm volatile("lock ; subl $1,%0\n\t" \
+ "js 2f\n" \
+ "1:\n" \
+ LOCK_SECTION_START("") \
+ "2:\tpushq %%rax\n\t" \
+ "leaq %0,%%rax\n\t" \
+ "call " helper "\n\t" \
+ "popq %%rax\n\t" \
+ "jmp 1b\n" \
+ LOCK_SECTION_END \
+ :"=m" (*((volatile int *)rw))::"memory")
+
+#define __build_read_lock(rw, helper) do { \
+ if (__builtin_constant_p(rw)) \
+ __build_read_lock_const(rw, helper); \
+ else \
+ __build_read_lock_ptr(rw, helper); \
+ } while (0)
+
+#define __build_write_lock_ptr(rw, helper) \
+ asm volatile("lock ; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
+ "jnz 2f\n" \
+ "1:\n" \
+ LOCK_SECTION_START("") \
+ "2:\tcall " helper "\n\t" \
+ "jmp 1b\n" \
+ LOCK_SECTION_END \
+ ::"a" (rw) : "memory")
+
+#define __build_write_lock_const(rw, helper) \
+ asm volatile("lock ; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
+ "jnz 2f\n" \
+ "1:\n" \
+ LOCK_SECTION_START("") \
+ "2:\tpushq %%rax\n\t" \
+ "leaq %0,%%rax\n\t" \
+ "call " helper "\n\t" \
+ "popq %%rax\n\t" \
+ "jmp 1b\n" \
+ LOCK_SECTION_END \
+ :"=m" (*((volatile long *)rw))::"memory")
+
+#define __build_write_lock(rw, helper) do { \
+ if (__builtin_constant_p(rw)) \
+ __build_write_lock_const(rw, helper); \
+ else \
+ __build_write_lock_ptr(rw, helper); \
+ } while (0)
+
+#endif
--- /dev/null
+#ifndef _X86_64_SECTIONS_H
+#define _X86_64_SECTIONS_H
+
+/* nothing to see, move along */
+#include <arch-generic/sections.h>
+
+#endif
--- /dev/null
+#ifndef _ARCH_SEGMENT_H
+#define _ARCH_SEGMENT_H
+
+#include <arch/cache.h>
+
+#define __KERNEL_CS 0x10
+#define __KERNEL_DS 0x18
+
+#define __KERNEL32_CS 0x08
+
+/*
+ * we cannot use the same code segment descriptor for user and kernel
+ * -- not even in the long flat mode, because of different DPL /kkeil
+ * The segment offset needs to contain a RPL. Grr. -AK
+ * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
+ */
+
+#define __USER32_CS 0x23 /* 4*8+3 */
+#define __USER_DS 0x2b /* 5*8+3 */
+#define __USER_CS 0x33 /* 6*8+3 */
+#define __USER32_DS __USER_DS
+
+#define GDT_ENTRY_TSS 8 /* needs two entries */
+#define GDT_ENTRY_LDT 10 /* needs two entries */
+#define GDT_ENTRY_TLS_MIN 12
+#define GDT_ENTRY_TLS_MAX 14
+
+#define GDT_ENTRY_TLS_ENTRIES 3
+
+#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
+#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
+
+/* TLS indexes for 64bit - hardcoded in arch_prctl */
+#define FS_TLS 0
+#define GS_TLS 1
+
+#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
+#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
+
+#define IDT_ENTRIES 256
+#define GDT_ENTRIES 16
+#define GDT_SIZE (GDT_ENTRIES * 8)
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#endif
--- /dev/null
+#ifndef _ASM_X86_64_SHOW_H
+#define _ASM_X86_64_SHOW_H
+
+#include <lwk/ptrace.h>
+
+extern void printk_address(unsigned long address);
+extern void show_registers(struct pt_regs *regs);
+
+#endif
--- /dev/null
+#ifndef _X86_64_SIGCONTEXT_H
+#define _X86_64_SIGCONTEXT_H
+
+#include <arch/types.h>
+#include <lwk/compiler.h>
+
+/* FXSAVE frame */
+/* Note: reserved1/2 may someday contain valuable data. Always save/restore
+ them when you change signal frames. */
+struct _fpstate {
+ __u16 cwd;
+ __u16 swd;
+ __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
+ __u16 fop;
+ __u64 rip;
+ __u64 rdp;
+ __u32 mxcsr;
+ __u32 mxcsr_mask;
+ __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
+ __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
+ __u32 reserved2[24];
+};
+
+struct sigcontext {
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long rdi;
+ unsigned long rsi;
+ unsigned long rbp;
+ unsigned long rbx;
+ unsigned long rdx;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rsp;
+ unsigned long rip;
+ unsigned long eflags; /* RFLAGS */
+ unsigned short cs;
+ unsigned short gs;
+ unsigned short fs;
+ unsigned short __pad0;
+ unsigned long err;
+ unsigned long trapno;
+ unsigned long oldmask;
+ unsigned long cr2;
+ struct _fpstate __user *fpstate; /* zero when no FPU context */
+ unsigned long reserved1[8];
+};
+
+#endif
--- /dev/null
+#ifndef _X8664_SIGINFO_H
+#define _X8664_SIGINFO_H
+
+#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+
+#include <arch-generic/siginfo.h>
+
+#endif
--- /dev/null
+#ifndef _ASM_X86_64_SIGNAL_H
+#define _ASM_X86_64_SIGNAL_H
+
+#ifndef __ASSEMBLY__
+#include <lwk/types.h>
+#include <lwk/time.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifdef __KERNEL__
+#include <lwk/linkage.h>
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+#define _NSIG 64
+#define _NSIG_BPW 64
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+#endif
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#include <arch-generic/signal.h>
+
+#ifndef __ASSEMBLY__
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+#include <arch/sigcontext.h>
+
+#undef __HAVE_ARCH_SIG_BITOPS
+#endif
+
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif
--- /dev/null
+#ifndef _ASM_SMP_H
+#define _ASM_SMP_H
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifndef __ASSEMBLY__
+/* #include <linux/threads.h>s */
+#include <lwk/cpumask.h>
+#include <lwk/bitops.h>
+extern int disable_apic;
+#endif
+
+#ifndef __ASSEMBLY__
+#include <arch/fixmap.h>
+//#include <asm/mpspec.h>
+//#include <asm/io_apic.h>
+#include <arch/apic.h>
+//#include <asm/thread_info.h>
+#include <arch/pda.h>
+
+struct pt_regs;
+
+extern cpumask_t cpu_present_mask;
+extern cpumask_t cpu_possible_map;
+extern cpumask_t cpu_online_map;
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
+
+/*
+ * Private routines/data
+ */
+
+extern void smp_alloc_memory(void);
+extern volatile unsigned long smp_invalidate_needed;
+extern int pic_mode;
+extern void lock_ipi_call_lock(void);
+extern void unlock_ipi_call_lock(void);
+extern int smp_num_siblings;
+extern void smp_send_reschedule(int cpu);
+void smp_stop_cpu(void);
+extern int smp_call_function_single(int cpuid, void (*func) (void *info),
+ void *info, int retry, int wait);
+
+extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern cpumask_t cpu_core_map[NR_CPUS];
+extern uint16_t phys_proc_id[NR_CPUS];
+extern uint16_t cpu_core_id[NR_CPUS];
+extern uint16_t cpu_llc_id[NR_CPUS];
+
+#define SMP_TRAMPOLINE_BASE 0x6000
+
+/*
+ * On x86 all CPUs are mapped 1:1 to the APIC space.
+ * This simplifies scheduling and IPI sending and
+ * compresses data structures.
+ */
+
+static inline int num_booting_cpus(void)
+{
+ return cpus_weight(cpu_callout_map);
+}
+
+#define raw_smp_processor_id() read_pda(cpunumber)
+
+static inline int hard_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
+}
+
+extern int safe_smp_processor_id(void);
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+extern void prefill_possible_map(void);
+extern unsigned num_processors;
+extern unsigned disabled_cpus;
+
+#endif /* !ASSEMBLY */
+
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+
+
+#ifndef ASSEMBLY
+/*
+ * Some lowlevel functions might want to know about
+ * the real APIC ID <-> CPU # mapping.
+ */
+extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
+extern u8 x86_cpu_to_log_apicid[NR_CPUS];
+extern u8 bios_cpu_apicid[];
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+ return cpus_addr(cpumask)[0];
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < NR_CPUS)
+ return (int)bios_cpu_apicid[mps_cpu];
+ else
+ return BAD_APICID;
+}
+
+#endif /* !ASSEMBLY */
+
+#include <lwk/task.h>
+#define stack_smp_processor_id() \
+({ \
+ struct task_struct *task; \
+ __asm__("andq %%rsp,%0; ":"=r" (task) : "0" (CURRENT_MASK)); \
+ task->arch.cpu; \
+})
+
+#ifndef __ASSEMBLY__
+static __inline int logical_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+}
+#endif
+
+#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+
+#endif
+
--- /dev/null
+#ifndef _X86_64_SPINLOCK_H
+#define _X86_64_SPINLOCK_H
+
+#include <arch/atomic.h>
+#include <arch/rwlock.h>
+#include <arch/page.h>
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ *
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in arch/spinlock_types.h)
+ */
+
+#define __raw_spin_is_locked(x) \
+ (*(volatile signed int *)(&(x)->slock) <= 0)
+
+#define __raw_spin_lock_string \
+ "\n1:\t" \
+ "lock ; decl %0\n\t" \
+ "js 2f\n" \
+ LOCK_SECTION_START("") \
+ "2:\t" \
+ "rep;nop\n\t" \
+ "cmpl $0,%0\n\t" \
+ "jle 2b\n\t" \
+ "jmp 1b\n" \
+ LOCK_SECTION_END
+
+#define __raw_spin_unlock_string \
+ "movl $1,%0" \
+ :"=m" (lock->slock) : : "memory"
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ __raw_spin_lock_string
+ :"=m" (lock->slock) : : "memory");
+}
+
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ int oldval;
+
+ __asm__ __volatile__(
+ "xchgl %0,%1"
+ :"=q" (oldval), "=m" (lock->slock)
+ :"0" (0) : "memory");
+
+ return oldval > 0;
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ __raw_spin_unlock_string
+ );
+}
+
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ *
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores. See
+ * semaphore.h for details. -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
+ */
+
+#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
+#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
+{
+ __build_read_lock(rw, "__read_lock_failed");
+}
+
+static inline void __raw_write_lock(raw_rwlock_t *rw)
+{
+ __build_write_lock(rw, "__write_lock_failed");
+}
+
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ atomic_dec(count);
+ if (atomic_read(count) >= 0)
+ return 1;
+ atomic_inc(count);
+ return 0;
+}
+
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+ return 1;
+ atomic_add(RW_LOCK_BIAS, count);
+ return 0;
+}
+
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+ asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+ asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
+ : "=m" (rw->lock) : : "memory");
+}
+
+#endif /* _X86_64_SPINLOCK_H */
--- /dev/null
+#ifndef _X86_64_SPINLOCK_TYPES_H
+#define _X86_64_SPINLOCK_TYPES_H
+
+#ifndef _LWK_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif
--- /dev/null
+#ifndef _X86_64_STAT_H
+#define _X86_64_STAT_H
+
+#define STAT_HAVE_NSEC 1
+
+struct stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned long st_nlink;
+
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad0;
+ unsigned long st_rdev;
+ long st_size;
+ long st_blksize;
+ long st_blocks; /* Number 512-byte blocks allocated. */
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ long __unused[3];
+};
+
+#endif
--- /dev/null
+#ifndef _X86_64_STRING_H_
+#define _X86_64_STRING_H_
+
+#ifdef __KERNEL__
+
+/* Written 2002 by Andi Kleen */
+
+/* Only used for special circumstances. Stolen from i386/string.h */
+static inline void * __inline_memcpy(void * to, const void * from, size_t n)
+{
+unsigned long d0, d1, d2;
+__asm__ __volatile__(
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+return (to);
+}
+
+/* Even with __builtin_ the compiler may decide to use the out of line
+ function. */
+
+#define __HAVE_ARCH_MEMCPY 1
+extern void *__memcpy(void *to, const void *from, size_t len);
+#define memcpy(dst,src,len) \
+ ({ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = __memcpy((dst),(src),__len); \
+ else \
+ __ret = __builtin_memcpy((dst),(src),__len); \
+ __ret; })
+
+
+#define __HAVE_ARCH_MEMSET
+void *memset(void *s, int c, size_t n);
+
+#define __HAVE_ARCH_MEMMOVE
+void * memmove(void * dest,const void *src,size_t count);
+
+int memcmp(const void * cs,const void * ct,size_t count);
+size_t strlen(const char * s);
+char *strcpy(char * dest,const char *src);
+char *strcat(char * dest, const char * src);
+int strcmp(const char * cs,const char * ct);
+
+#endif /* __KERNEL__ */
+
+#endif
--- /dev/null
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <lwk/kernel.h>
+#include <arch/segment.h>
+
+#ifdef __KERNEL__
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
+#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
+
+#define __EXTRA_CLOBBER \
+ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
+
+#define switch_to(prev,next,last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
+ "call __switch_to\n\t" \
+ ".globl thread_return\n" \
+ "thread_return:\n\t" \
+ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
+ "lock ; btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "jc ret_from_fork\n\t" \
+ RESTORE_CONTEXT \
+ : "=a" (last) \
+ : [next] "S" (next), [prev] "D" (prev), \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
+ [tif_fork] "i" (TIF_FORK), \
+ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
+ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
+ : "memory", "cc" __EXTRA_CLOBBER)
+
+extern void load_gs_index(unsigned);
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %k0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "movl %1,%%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 8\n\t" \
+ ".quad 1b,3b\n" \
+ ".previous" \
+ : :"r" (value), "r" (0))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+
+static inline unsigned long read_cr0(void)
+{
+ unsigned long cr0;
+ asm volatile("movq %%cr0,%0" : "=r" (cr0));
+ return cr0;
+}
+
+static inline void write_cr0(unsigned long val)
+{
+ asm volatile("movq %0,%%cr0" :: "r" (val));
+}
+
+static inline unsigned long read_cr3(void)
+{
+ unsigned long cr3;
+ asm("movq %%cr3,%0" : "=r" (cr3));
+ return cr3;
+}
+
+static inline unsigned long read_cr4(void)
+{
+ unsigned long cr4;
+ asm("movq %%cr4,%0" : "=r" (cr4));
+ return cr4;
+}
+
+static inline void write_cr4(unsigned long val)
+{
+ asm volatile("movq %0,%%cr4" :: "r" (val));
+}
+
+#define stts() write_cr0(8 | read_cr0())
+
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+
+static inline unsigned long read_eflags(void)
+{
+ unsigned long eflags;
+
+ __asm__ __volatile__(
+ "# __raw_save_flags\n\t"
+ "pushf ; pop %0"
+ : "=g" (eflags)
+ : /* no input */
+ : "memory"
+ );
+
+ return eflags;
+}
+
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
+#endif /* __KERNEL__ */
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+#define __xg(x) ((volatile long *)(x))
+
+static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
+{
+ *ptr = val;
+}
+
+#define _set_64bit set_64bit
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("lock ; cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("lock ; cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("lock ; cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__("lock ; cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() do {} while(0)
+
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+#define wmb() asm volatile("sfence" ::: "memory")
+#define read_barrier_depends() do {} while(0)
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
+
+/* interrupt control.. */
+#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
+#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
+
+#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
+#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ !(flags & (1<<9)); \
+})
+
+#define irqs_enabled() !irqs_disabled()
+
+/* For spinlocks etc */
+#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
+
+/* used in the idle loop; sti takes one instruction cycle to complete */
+#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
+/* used when interrupts are already enabled or to shutdown the processor */
+#define halt() __asm__ __volatile__("hlt": : :"memory")
+
+void cpu_idle_wait(void);
+
+extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+#endif
--- /dev/null
+#ifndef _ARCH_TASK_H
+#define _ARCH_TASK_H
+
+/**
+ * Bits in the arch_task.flags field.
+ */
+#define _TF_NEW_TASK_BIT 0
+#define _TF_USED_FPU_BIT 1
+
+/**
+ * Masks for the bits in the arch_task.flags field.
+ */
+#define TF_NEW_TASK (1 << _TF_NEW_TASK_BIT)
+#define TF_USED_FPU (1 << _TF_USED_FPU_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <arch/pda.h>
+#include <arch/page_table.h>
+
+struct arch_mm {
+ xpte_t *page_table_root;
+};
+
+/**
+ * Architecture-specific task information.
+ */
+struct arch_task {
+ uint32_t flags; /* arch-dependent task flags */
+ unsigned long addr_limit; /* task's virtual memory space is from [0,addr_limit) */
+ struct thread_struct thread;
+};
+
+#endif
+#endif
--- /dev/null
+#ifndef _ARCH_x86_64_TIME_H
+#define _ARCH_x86_64_TIME_H
+
+#include <arch/tsc.h>
+
+#endif
--- /dev/null
+#ifndef _X86_64_TLBFLUSH_H
+#define _X86_64_TLBFLUSH_H
+
+//#include <linux/mm.h>
+#include <arch/processor.h>
+
+#define __flush_tlb() \
+ do { \
+ unsigned long tmpreg; \
+ \
+ __asm__ __volatile__( \
+ "movq %%cr3, %0; # flush TLB \n" \
+ "movq %0, %%cr3; \n" \
+ : "=r" (tmpreg) \
+ :: "memory"); \
+ } while (0)
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global() \
+ do { \
+ unsigned long tmpreg, cr4, cr4_orig; \
+ \
+ __asm__ __volatile__( \
+ "movq %%cr4, %2; # turn off PGE \n" \
+ "movq %2, %1; \n" \
+ "andq %3, %1; \n" \
+ "movq %1, %%cr4; \n" \
+ "movq %%cr3, %0; # flush TLB \n" \
+ "movq %0, %%cr3; \n" \
+ "movq %2, %%cr4; # turn PGE back on \n" \
+ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
+ : "i" (~X86_CR4_PGE) \
+ : "memory"); \
+ } while (0)
+
+extern unsigned long pgkern_mask;
+
+#define __flush_tlb_all() __flush_tlb_global()
+
+#define __flush_tlb_one(addr) \
+ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+
+#if 0
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * x86-64 can only flush individual pages or full VMs. For a range flush
+ * we always do the full VM. Might be worth trying if for a small
+ * range a few INVLPGs in a row are a win.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+ __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+/* Roughly an IPI every 20MB with 4k pages for freeing page table
+ ranges. Cost is about 42k of memory for each CPU. */
+#define ARCH_FREE_PTE_NR 5350
+
+#endif
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* x86_64 does not keep any page table caches in a software TLB.
+ The CPUs do in their hardware TLBs, but they are handled
+ by the normal TLB flushing algorithms. */
+}
+
+#endif
+
+#endif /* _X86_64_TLBFLUSH_H */
--- /dev/null
+/*
+ * linux/include/asm-i386/tsc.h
+ *
+ * i386 TSC related functions
+ */
+#ifndef _ARCH_x86_64_TSC_H
+#define _ARCH_x86_64_TSC_H
+
+#include <lwk/types.h>
+#include <arch/processor.h>
+
+typedef uint64_t cycles_t;
+
+/**
+ * Returns the current value of the CPU's cycle counter.
+ *
+ * NOTE: This is not serializing. It doesn't necessarily wait for previous
+ * instructions to complete before reading the cycle counter. Also,
+ * subsequent instructions could potentially begin execution before
+ * the cycle counter is read.
+ */
+static __always_inline cycles_t
+get_cycles(void)
+{
+ cycles_t ret = 0;
+ rdtscll(ret);
+ return ret;
+}
+
+/**
+ * This is a synchronizing version of get_cycles(). It ensures that all
+ * previous instructions have completed before reading the cycle counter.
+ */
+static __always_inline cycles_t
+get_cycles_sync(void)
+{
+ sync_core();
+ return get_cycles();
+}
+
+#endif
--- /dev/null
+#ifndef _X86_64_TYPES_H
+#define _X86_64_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 64
+
+#ifndef __ASSEMBLY__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+typedef u64 dma64_addr_t;
+typedef u64 dma_addr_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
--- /dev/null
+#ifndef _X86_64_UACCESS_H
+#define _X86_64_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <lwk/compiler.h>
+#include <lwk/errno.h>
+#include <lwk/prefetch.h>
+#include <lwk/task.h>
+#include <arch/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define KERNEL_DS 0xFFFFFFFFFFFFFFFFUL
+#define USER_DS PAGE_OFFSET
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->arch.addr_limit)
+#define set_fs(x) (current->arch.addr_limit = (x))
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+#define __addr_ok(addr) (!((unsigned long)(addr) & (current->arch.addr_limit)))
+
+/*
+ * Uhhuh, this needs 65-bit arithmetic. We have a carry..
+ */
+#define __range_not_ok(addr,size) ({ \
+ unsigned long flag,sum; \
+ __chk_user_ptr(addr); \
+ asm("# range_ok\n\r" \
+ "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
+ :"=&r" (flag), "=r" (sum) \
+ :"1" (addr),"g" ((long)(size)),"g" (current->arch.addr_limit)); \
+ flag; })
+
+#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+
+#define __get_user_x(size,ret,x,ptr) \
+ asm volatile("call __get_user_" #size \
+ :"=a" (ret),"=d" (x) \
+ :"c" (ptr) \
+ :"r8")
+
+/* Careful: we have to cast the result to the type of the pointer for sign reasons */
+#define get_user(x,ptr) \
+({ unsigned long __val_gu; \
+ int __ret_gu; \
+ __chk_user_ptr(ptr); \
+ switch(sizeof (*(ptr))) { \
+ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
+ case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
+ case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
+ case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
+ default: __get_user_bad(); break; \
+ } \
+ (x) = (typeof(*(ptr)))__val_gu; \
+ __ret_gu; \
+})
+
+extern void __put_user_1(void);
+extern void __put_user_2(void);
+extern void __put_user_4(void);
+extern void __put_user_8(void);
+extern void __put_user_bad(void);
+
+#define __put_user_x(size,ret,x,ptr) \
+ asm volatile("call __put_user_" #size \
+ :"=a" (ret) \
+ :"c" (ptr),"d" (x) \
+ :"r8")
+
+#define put_user(x,ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __get_user(x,ptr) \
+ __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __put_user(x,ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+
+#define __put_user_nocheck(x,ptr,size) \
+({ \
+ int __pu_err; \
+ __put_user_size((x),(ptr),(size),__pu_err); \
+ __pu_err; \
+})
+
+
+#define __put_user_check(x,ptr,size) \
+({ \
+ int __pu_err; \
+ typeof(*(ptr)) __user *__pu_addr = (ptr); \
+ switch (size) { \
+ case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
+ case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
+ case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
+ case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
+ default: __put_user_bad(); \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
+ case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
+ case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
+ case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+/* FIXME: this hack is definitely wrong -AK */
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
+ asm volatile( \
+ "1: mov"itype" %"rtype"1,%2\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 1b,3b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
+
+
+#define __get_user_nocheck(x,ptr,size) \
+({ \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val,(ptr),(size),__gu_err); \
+ (x) = (typeof(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern int __get_user_1(void);
+extern int __get_user_2(void);
+extern int __get_user_4(void);
+extern int __get_user_8(void);
+extern int __get_user_bad(void);
+
+#define __get_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
+ case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
+ case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
+ case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
+ asm volatile( \
+ "1: mov"itype" %2,%"rtype"1\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 1b,3b\n" \
+ ".previous" \
+ : "=r"(err), ltype (x) \
+ : "m"(__m(addr)), "i"(errno), "0"(err))
+
+/*
+ * Copy To/From Userspace
+ */
+
+/* Handles exceptions in both to and from, but doesn't do access_ok */
+__must_check unsigned long
+copy_user_generic(void *to, const void *from, unsigned len);
+
+__must_check unsigned long
+copy_to_user(void __user *to, const void *from, unsigned len);
+__must_check unsigned long
+copy_from_user(void *to, const void __user *from, unsigned len);
+__must_check unsigned long
+copy_in_user(void __user *to, const void __user *from, unsigned len);
+
+static __always_inline __must_check
+int __copy_from_user(void *dst, const void __user *src, unsigned size)
+{
+ int ret = 0;
+ if (!__builtin_constant_p(size))
+ return copy_user_generic(dst,(__force void *)src,size);
+ switch (size) {
+ case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
+ return ret;
+ case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
+ return ret;
+ case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
+ return ret;
+ case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
+ return ret;
+ case 10:
+ __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
+ if (unlikely(ret)) return ret;
+ __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
+ return ret;
+ case 16:
+ __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
+ if (unlikely(ret)) return ret;
+ __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
+ return ret;
+ default:
+ return copy_user_generic(dst,(__force void *)src,size);
+ }
+}
+
+static __always_inline __must_check
+int __copy_to_user(void __user *dst, const void *src, unsigned size)
+{
+ int ret = 0;
+ if (!__builtin_constant_p(size))
+ return copy_user_generic((__force void *)dst,src,size);
+ switch (size) {
+ case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
+ return ret;
+ case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
+ return ret;
+ case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
+ return ret;
+ case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
+ return ret;
+ case 10:
+ __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
+ if (unlikely(ret)) return ret;
+ asm("":::"memory");
+ __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
+ return ret;
+ case 16:
+ __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
+ if (unlikely(ret)) return ret;
+ asm("":::"memory");
+ __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
+ return ret;
+ default:
+ return copy_user_generic((__force void *)dst,src,size);
+ }
+}
+
+static __always_inline __must_check
+int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+{
+ int ret = 0;
+ if (!__builtin_constant_p(size))
+ return copy_user_generic((__force void *)dst,(__force void *)src,size);
+ switch (size) {
+ case 1: {
+ u8 tmp;
+ __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
+ if (likely(!ret))
+ __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
+ return ret;
+ }
+ case 2: {
+ u16 tmp;
+ __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
+ if (likely(!ret))
+ __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
+ return ret;
+ }
+
+ case 4: {
+ u32 tmp;
+ __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
+ if (likely(!ret))
+ __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
+ return ret;
+ }
+ case 8: {
+ u64 tmp;
+ __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
+ if (likely(!ret))
+ __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
+ return ret;
+ }
+ default:
+ return copy_user_generic((__force void *)dst,(__force void *)src,size);
+ }
+}
+
+__must_check long
+strncpy_from_user(char *dst, const char __user *src, long count);
+__must_check long
+__strncpy_from_user(char *dst, const char __user *src, long count);
+__must_check long strnlen_user(const char __user *str, long n);
+__must_check long __strnlen_user(const char __user *str, long n);
+__must_check long strlen_user(const char __user *str);
+__must_check unsigned long clear_user(void __user *mem, unsigned long len);
+__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
+
+__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
+
+static __must_check __always_inline int
+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+{
+ return copy_user_generic((__force void *)dst, src, size);
+}
+
+#endif /* _X86_64_UACCESS_H */
--- /dev/null
+#ifndef _ARCH_X86_64_UNISTD_H
+#define _ARCH_X86_64_UNISTD_H
+
+/**
+ * This file contains the system call numbers.
+ *
+ * NOTE: holes are not allowed.
+ */
+
+#ifndef __SYSCALL
+#define __SYSCALL(a,b)
+#endif
+
+#define __NR_read 0
+__SYSCALL(__NR_read, syscall_not_implemented)
+#define __NR_write 1
+__SYSCALL(__NR_write, sys_write)
+#define __NR_open 2
+__SYSCALL(__NR_open, syscall_not_implemented)
+#define __NR_close 3
+__SYSCALL(__NR_close, syscall_not_implemented)
+#define __NR_stat 4
+__SYSCALL(__NR_stat, syscall_not_implemented)
+#define __NR_fstat 5
+__SYSCALL(__NR_fstat, sys_fstat)
+#define __NR_lstat 6
+__SYSCALL(__NR_lstat, syscall_not_implemented)
+#define __NR_poll 7
+__SYSCALL(__NR_poll, syscall_not_implemented)
+
+#define __NR_lseek 8
+__SYSCALL(__NR_lseek, syscall_not_implemented)
+#define __NR_mmap 9
+__SYSCALL(__NR_mmap, sys_mmap)
+#define __NR_mprotect 10
+__SYSCALL(__NR_mprotect, syscall_not_implemented)
+#define __NR_munmap 11
+__SYSCALL(__NR_munmap, syscall_not_implemented)
+#define __NR_brk 12
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_rt_sigaction 13
+__SYSCALL(__NR_rt_sigaction, syscall_not_implemented)
+#define __NR_rt_sigprocmask 14
+__SYSCALL(__NR_rt_sigprocmask, syscall_not_implemented)
+#define __NR_rt_sigreturn 15
+__SYSCALL(__NR_rt_sigreturn, syscall_not_implemented)
+
+#define __NR_ioctl 16
+__SYSCALL(__NR_ioctl, syscall_not_implemented)
+#define __NR_pread64 17
+__SYSCALL(__NR_pread64, syscall_not_implemented)
+#define __NR_pwrite64 18
+__SYSCALL(__NR_pwrite64, syscall_not_implemented)
+#define __NR_readv 19
+__SYSCALL(__NR_readv, syscall_not_implemented)
+#define __NR_writev 20
+__SYSCALL(__NR_writev, syscall_not_implemented)
+#define __NR_access 21
+__SYSCALL(__NR_access, syscall_not_implemented)
+#define __NR_pipe 22
+__SYSCALL(__NR_pipe, syscall_not_implemented)
+#define __NR_select 23
+__SYSCALL(__NR_select, syscall_not_implemented)
+
+#define __NR_sched_yield 24
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_mremap 25
+__SYSCALL(__NR_mremap, syscall_not_implemented)
+#define __NR_msync 26
+__SYSCALL(__NR_msync, syscall_not_implemented)
+#define __NR_mincore 27
+__SYSCALL(__NR_mincore, syscall_not_implemented)
+#define __NR_madvise 28
+__SYSCALL(__NR_madvise, syscall_not_implemented)
+#define __NR_shmget 29
+__SYSCALL(__NR_shmget, syscall_not_implemented)
+#define __NR_shmat 30
+__SYSCALL(__NR_shmat, syscall_not_implemented)
+#define __NR_shmctl 31
+__SYSCALL(__NR_shmctl, syscall_not_implemented)
+
+#define __NR_dup 32
+__SYSCALL(__NR_dup, syscall_not_implemented)
+#define __NR_dup2 33
+__SYSCALL(__NR_dup2, syscall_not_implemented)
+#define __NR_pause 34
+__SYSCALL(__NR_pause, syscall_not_implemented)
+#define __NR_nanosleep 35
+__SYSCALL(__NR_nanosleep, sys_nanosleep)
+#define __NR_getitimer 36
+__SYSCALL(__NR_getitimer, syscall_not_implemented)
+#define __NR_alarm 37
+__SYSCALL(__NR_alarm, syscall_not_implemented)
+#define __NR_setitimer 38
+__SYSCALL(__NR_setitimer, syscall_not_implemented)
+#define __NR_getpid 39
+__SYSCALL(__NR_getpid, syscall_not_implemented)
+
+#define __NR_sendfile 40
+__SYSCALL(__NR_sendfile, syscall_not_implemented)
+#define __NR_socket 41
+__SYSCALL(__NR_socket, syscall_not_implemented)
+#define __NR_connect 42
+__SYSCALL(__NR_connect, syscall_not_implemented)
+#define __NR_accept 43
+__SYSCALL(__NR_accept, syscall_not_implemented)
+#define __NR_sendto 44
+__SYSCALL(__NR_sendto, syscall_not_implemented)
+#define __NR_recvfrom 45
+__SYSCALL(__NR_recvfrom, syscall_not_implemented)
+#define __NR_sendmsg 46
+__SYSCALL(__NR_sendmsg, syscall_not_implemented)
+#define __NR_recvmsg 47
+__SYSCALL(__NR_recvmsg, syscall_not_implemented)
+
+#define __NR_shutdown 48
+__SYSCALL(__NR_shutdown, syscall_not_implemented)
+#define __NR_bind 49
+__SYSCALL(__NR_bind, syscall_not_implemented)
+#define __NR_listen 50
+__SYSCALL(__NR_listen, syscall_not_implemented)
+#define __NR_getsockname 51
+__SYSCALL(__NR_getsockname, syscall_not_implemented)
+#define __NR_getpeername 52
+__SYSCALL(__NR_getpeername, syscall_not_implemented)
+#define __NR_socketpair 53
+__SYSCALL(__NR_socketpair, syscall_not_implemented)
+#define __NR_setsockopt 54
+__SYSCALL(__NR_setsockopt, syscall_not_implemented)
+#define __NR_getsockopt 55
+__SYSCALL(__NR_getsockopt, syscall_not_implemented)
+
+#define __NR_clone 56
+__SYSCALL(__NR_clone, syscall_not_implemented)
+#define __NR_fork 57
+__SYSCALL(__NR_fork, syscall_not_implemented)
+#define __NR_vfork 58
+__SYSCALL(__NR_vfork, syscall_not_implemented)
+#define __NR_execve 59
+__SYSCALL(__NR_execve, syscall_not_implemented)
+#define __NR_exit 60
+__SYSCALL(__NR_exit, sys_task_exit)
+#define __NR_wait4 61
+__SYSCALL(__NR_wait4, syscall_not_implemented)
+#define __NR_kill 62
+__SYSCALL(__NR_kill, syscall_not_implemented)
+#define __NR_uname 63
+__SYSCALL(__NR_uname, sys_uname)
+
+#define __NR_semget 64
+__SYSCALL(__NR_semget, syscall_not_implemented)
+#define __NR_semop 65
+__SYSCALL(__NR_semop, syscall_not_implemented)
+#define __NR_semctl 66
+__SYSCALL(__NR_semctl, syscall_not_implemented)
+#define __NR_shmdt 67
+__SYSCALL(__NR_shmdt, syscall_not_implemented)
+#define __NR_msgget 68
+__SYSCALL(__NR_msgget, syscall_not_implemented)
+#define __NR_msgsnd 69
+__SYSCALL(__NR_msgsnd, syscall_not_implemented)
+#define __NR_msgrcv 70
+__SYSCALL(__NR_msgrcv, syscall_not_implemented)
+#define __NR_msgctl 71
+__SYSCALL(__NR_msgctl, syscall_not_implemented)
+
+#define __NR_fcntl 72
+__SYSCALL(__NR_fcntl, syscall_not_implemented)
+#define __NR_flock 73
+__SYSCALL(__NR_flock, syscall_not_implemented)
+#define __NR_fsync 74
+__SYSCALL(__NR_fsync, syscall_not_implemented)
+#define __NR_fdatasync 75
+__SYSCALL(__NR_fdatasync, syscall_not_implemented)
+#define __NR_truncate 76
+__SYSCALL(__NR_truncate, syscall_not_implemented)
+#define __NR_ftruncate 77
+__SYSCALL(__NR_ftruncate, syscall_not_implemented)
+#define __NR_getdents 78
+__SYSCALL(__NR_getdents, syscall_not_implemented)
+#define __NR_getcwd 79
+__SYSCALL(__NR_getcwd, syscall_not_implemented)
+
+#define __NR_chdir 80
+__SYSCALL(__NR_chdir, syscall_not_implemented)
+#define __NR_fchdir 81
+__SYSCALL(__NR_fchdir, syscall_not_implemented)
+#define __NR_rename 82
+__SYSCALL(__NR_rename, syscall_not_implemented)
+#define __NR_mkdir 83
+__SYSCALL(__NR_mkdir, syscall_not_implemented)
+#define __NR_rmdir 84
+__SYSCALL(__NR_rmdir, syscall_not_implemented)
+#define __NR_creat 85
+__SYSCALL(__NR_creat, syscall_not_implemented)
+#define __NR_link 86
+__SYSCALL(__NR_link, syscall_not_implemented)
+#define __NR_unlink 87
+__SYSCALL(__NR_unlink, syscall_not_implemented)
+
+#define __NR_symlink 88
+__SYSCALL(__NR_symlink, syscall_not_implemented)
+#define __NR_readlink 89
+__SYSCALL(__NR_readlink, syscall_not_implemented)
+#define __NR_chmod 90
+__SYSCALL(__NR_chmod, syscall_not_implemented)
+#define __NR_fchmod 91
+__SYSCALL(__NR_fchmod, syscall_not_implemented)
+#define __NR_chown 92
+__SYSCALL(__NR_chown, syscall_not_implemented)
+#define __NR_fchown 93
+__SYSCALL(__NR_fchown, syscall_not_implemented)
+#define __NR_lchown 94
+__SYSCALL(__NR_lchown, syscall_not_implemented)
+#define __NR_umask 95
+__SYSCALL(__NR_umask, syscall_not_implemented)
+
+#define __NR_gettimeofday 96
+__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
+#define __NR_getrlimit 97
+__SYSCALL(__NR_getrlimit, syscall_not_implemented)
+#define __NR_getrusage 98
+__SYSCALL(__NR_getrusage, syscall_not_implemented)
+#define __NR_sysinfo 99
+__SYSCALL(__NR_sysinfo, syscall_not_implemented)
+#define __NR_times 100
+__SYSCALL(__NR_times, syscall_not_implemented)
+#define __NR_ptrace 101
+__SYSCALL(__NR_ptrace, syscall_not_implemented)
+#define __NR_getuid 102
+__SYSCALL(__NR_getuid, sys_getuid)
+#define __NR_syslog 103
+__SYSCALL(__NR_syslog, syscall_not_implemented)
+
+/* at the very end the stuff that never runs during the benchmarks */
+#define __NR_getgid 104
+__SYSCALL(__NR_getgid, sys_getgid)
+#define __NR_setuid 105
+__SYSCALL(__NR_setuid, syscall_not_implemented)
+#define __NR_setgid 106
+__SYSCALL(__NR_setgid, syscall_not_implemented)
+#define __NR_geteuid 107
+__SYSCALL(__NR_geteuid, sys_getuid)
+#define __NR_getegid 108
+__SYSCALL(__NR_getegid, sys_getgid)
+#define __NR_setpgid 109
+__SYSCALL(__NR_setpgid, syscall_not_implemented)
+#define __NR_getppid 110
+__SYSCALL(__NR_getppid, syscall_not_implemented)
+#define __NR_getpgrp 111
+__SYSCALL(__NR_getpgrp, syscall_not_implemented)
+
+#define __NR_setsid 112
+__SYSCALL(__NR_setsid, syscall_not_implemented)
+#define __NR_setreuid 113
+__SYSCALL(__NR_setreuid, syscall_not_implemented)
+#define __NR_setregid 114
+__SYSCALL(__NR_setregid, syscall_not_implemented)
+#define __NR_getgroups 115
+__SYSCALL(__NR_getgroups, syscall_not_implemented)
+#define __NR_setgroups 116
+__SYSCALL(__NR_setgroups, syscall_not_implemented)
+#define __NR_setresuid 117
+__SYSCALL(__NR_setresuid, syscall_not_implemented)
+#define __NR_getresuid 118
+__SYSCALL(__NR_getresuid, syscall_not_implemented)
+#define __NR_setresgid 119
+__SYSCALL(__NR_setresgid, syscall_not_implemented)
+
+#define __NR_getresgid 120
+__SYSCALL(__NR_getresgid, syscall_not_implemented)
+#define __NR_getpgid 121
+__SYSCALL(__NR_getpgid, syscall_not_implemented)
+#define __NR_setfsuid 122
+__SYSCALL(__NR_setfsuid, syscall_not_implemented)
+#define __NR_setfsgid 123
+__SYSCALL(__NR_setfsgid, syscall_not_implemented)
+#define __NR_getsid 124
+__SYSCALL(__NR_getsid, syscall_not_implemented)
+#define __NR_capget 125
+__SYSCALL(__NR_capget, syscall_not_implemented)
+#define __NR_capset 126
+__SYSCALL(__NR_capset, syscall_not_implemented)
+
+#define __NR_rt_sigpending 127
+__SYSCALL(__NR_rt_sigpending, syscall_not_implemented)
+#define __NR_rt_sigtimedwait 128
+__SYSCALL(__NR_rt_sigtimedwait, syscall_not_implemented)
+#define __NR_rt_sigqueueinfo 129
+__SYSCALL(__NR_rt_sigqueueinfo, syscall_not_implemented)
+#define __NR_rt_sigsuspend 130
+__SYSCALL(__NR_rt_sigsuspend, syscall_not_implemented)
+#define __NR_sigaltstack 131
+__SYSCALL(__NR_sigaltstack, syscall_not_implemented)
+#define __NR_utime 132
+__SYSCALL(__NR_utime, syscall_not_implemented)
+#define __NR_mknod 133
+__SYSCALL(__NR_mknod, syscall_not_implemented)
+
+/* Only needed for a.out */
+#define __NR_uselib 134
+__SYSCALL(__NR_uselib, syscall_not_implemented)
+#define __NR_personality 135
+__SYSCALL(__NR_personality, syscall_not_implemented)
+
+#define __NR_ustat 136
+__SYSCALL(__NR_ustat, syscall_not_implemented)
+#define __NR_statfs 137
+__SYSCALL(__NR_statfs, syscall_not_implemented)
+#define __NR_fstatfs 138
+__SYSCALL(__NR_fstatfs, syscall_not_implemented)
+#define __NR_sysfs 139
+__SYSCALL(__NR_sysfs, syscall_not_implemented)
+
+#define __NR_getpriority 140
+__SYSCALL(__NR_getpriority, syscall_not_implemented)
+#define __NR_setpriority 141
+__SYSCALL(__NR_setpriority, syscall_not_implemented)
+#define __NR_sched_setparam 142
+__SYSCALL(__NR_sched_setparam, syscall_not_implemented)
+#define __NR_sched_getparam 143
+__SYSCALL(__NR_sched_getparam, syscall_not_implemented)
+#define __NR_sched_setscheduler 144
+__SYSCALL(__NR_sched_setscheduler, syscall_not_implemented)
+#define __NR_sched_getscheduler 145
+__SYSCALL(__NR_sched_getscheduler, syscall_not_implemented)
+#define __NR_sched_get_priority_max 146
+__SYSCALL(__NR_sched_get_priority_max, syscall_not_implemented)
+#define __NR_sched_get_priority_min 147
+__SYSCALL(__NR_sched_get_priority_min, syscall_not_implemented)
+#define __NR_sched_rr_get_interval 148
+__SYSCALL(__NR_sched_rr_get_interval, syscall_not_implemented)
+
+#define __NR_mlock 149
+__SYSCALL(__NR_mlock, syscall_not_implemented)
+#define __NR_munlock 150
+__SYSCALL(__NR_munlock, syscall_not_implemented)
+#define __NR_mlockall 151
+__SYSCALL(__NR_mlockall, syscall_not_implemented)
+#define __NR_munlockall 152
+__SYSCALL(__NR_munlockall, syscall_not_implemented)
+
+#define __NR_vhangup 153
+__SYSCALL(__NR_vhangup, syscall_not_implemented)
+
+#define __NR_modify_ldt 154
+__SYSCALL(__NR_modify_ldt, syscall_not_implemented)
+
+#define __NR_pivot_root 155
+__SYSCALL(__NR_pivot_root, syscall_not_implemented)
+
+#define __NR__sysctl 156
+__SYSCALL(__NR__sysctl, syscall_not_implemented)
+
+#define __NR_prctl 157
+__SYSCALL(__NR_prctl, syscall_not_implemented)
+#define __NR_arch_prctl 158
+__SYSCALL(__NR_arch_prctl, sys_arch_prctl)
+
+#define __NR_adjtimex 159
+__SYSCALL(__NR_adjtimex, syscall_not_implemented)
+
+#define __NR_setrlimit 160
+__SYSCALL(__NR_setrlimit, syscall_not_implemented)
+
+#define __NR_chroot 161
+__SYSCALL(__NR_chroot, syscall_not_implemented)
+
+#define __NR_sync 162
+__SYSCALL(__NR_sync, syscall_not_implemented)
+
+#define __NR_acct 163
+__SYSCALL(__NR_acct, syscall_not_implemented)
+
+#define __NR_settimeofday 164
+__SYSCALL(__NR_settimeofday, sys_settimeofday)
+
+#define __NR_mount 165
+__SYSCALL(__NR_mount, syscall_not_implemented)
+#define __NR_umount2 166
+__SYSCALL(__NR_umount2, syscall_not_implemented)
+
+#define __NR_swapon 167
+__SYSCALL(__NR_swapon, syscall_not_implemented)
+#define __NR_swapoff 168
+__SYSCALL(__NR_swapoff, syscall_not_implemented)
+
+#define __NR_reboot 169
+__SYSCALL(__NR_reboot, syscall_not_implemented)
+
+#define __NR_sethostname 170
+__SYSCALL(__NR_sethostname, syscall_not_implemented)
+#define __NR_setdomainname 171
+__SYSCALL(__NR_setdomainname, syscall_not_implemented)
+
+#define __NR_iopl 172
+__SYSCALL(__NR_iopl, syscall_not_implemented)
+#define __NR_ioperm 173
+__SYSCALL(__NR_ioperm, syscall_not_implemented)
+
+#define __NR_create_module 174
+__SYSCALL(__NR_create_module, syscall_not_implemented)
+#define __NR_init_module 175
+__SYSCALL(__NR_init_module, syscall_not_implemented)
+#define __NR_delete_module 176
+__SYSCALL(__NR_delete_module, syscall_not_implemented)
+#define __NR_get_kernel_syms 177
+__SYSCALL(__NR_get_kernel_syms, syscall_not_implemented)
+#define __NR_query_module 178
+__SYSCALL(__NR_query_module, syscall_not_implemented)
+
+#define __NR_quotactl 179
+__SYSCALL(__NR_quotactl, syscall_not_implemented)
+
+#define __NR_nfsservctl 180
+__SYSCALL(__NR_nfsservctl, syscall_not_implemented)
+
+#define __NR_getpmsg 181 /* reserved for LiS/STREAMS */
+__SYSCALL(__NR_getpmsg, syscall_not_implemented)
+#define __NR_putpmsg 182 /* reserved for LiS/STREAMS */
+__SYSCALL(__NR_putpmsg, syscall_not_implemented)
+
+#define __NR_afs_syscall 183 /* reserved for AFS */
+__SYSCALL(__NR_afs_syscall, syscall_not_implemented)
+
+#define __NR_tuxcall 184 /* reserved for tux */
+__SYSCALL(__NR_tuxcall, syscall_not_implemented)
+
+#define __NR_security 185
+__SYSCALL(__NR_security, syscall_not_implemented)
+
+#define __NR_gettid 186
+__SYSCALL(__NR_gettid, syscall_not_implemented)
+
+#define __NR_readahead 187
+__SYSCALL(__NR_readahead, syscall_not_implemented)
+#define __NR_setxattr 188
+__SYSCALL(__NR_setxattr, syscall_not_implemented)
+#define __NR_lsetxattr 189
+__SYSCALL(__NR_lsetxattr, syscall_not_implemented)
+#define __NR_fsetxattr 190
+__SYSCALL(__NR_fsetxattr, syscall_not_implemented)
+#define __NR_getxattr 191
+__SYSCALL(__NR_getxattr, syscall_not_implemented)
+#define __NR_lgetxattr 192
+__SYSCALL(__NR_lgetxattr, syscall_not_implemented)
+#define __NR_fgetxattr 193
+__SYSCALL(__NR_fgetxattr, syscall_not_implemented)
+#define __NR_listxattr 194
+__SYSCALL(__NR_listxattr, syscall_not_implemented)
+#define __NR_llistxattr 195
+__SYSCALL(__NR_llistxattr, syscall_not_implemented)
+#define __NR_flistxattr 196
+__SYSCALL(__NR_flistxattr, syscall_not_implemented)
+#define __NR_removexattr 197
+__SYSCALL(__NR_removexattr, syscall_not_implemented)
+#define __NR_lremovexattr 198
+__SYSCALL(__NR_lremovexattr, syscall_not_implemented)
+#define __NR_fremovexattr 199
+__SYSCALL(__NR_fremovexattr, syscall_not_implemented)
+#define __NR_tkill 200
+__SYSCALL(__NR_tkill, syscall_not_implemented)
+#define __NR_time 201
+__SYSCALL(__NR_time, sys_time)
+#define __NR_futex 202
+__SYSCALL(__NR_futex, syscall_not_implemented)
+#define __NR_sched_setaffinity 203
+__SYSCALL(__NR_sched_setaffinity, syscall_not_implemented)
+#define __NR_sched_getaffinity 204
+__SYSCALL(__NR_sched_getaffinity, syscall_not_implemented)
+#define __NR_set_thread_area 205
+__SYSCALL(__NR_set_thread_area, syscall_not_implemented)
+#define __NR_io_setup 206
+__SYSCALL(__NR_io_setup, syscall_not_implemented)
+#define __NR_io_destroy 207
+__SYSCALL(__NR_io_destroy, syscall_not_implemented)
+#define __NR_io_getevents 208
+__SYSCALL(__NR_io_getevents, syscall_not_implemented)
+#define __NR_io_submit 209
+__SYSCALL(__NR_io_submit, syscall_not_implemented)
+#define __NR_io_cancel 210
+__SYSCALL(__NR_io_cancel, syscall_not_implemented)
+#define __NR_get_thread_area 211
+__SYSCALL(__NR_get_thread_area, syscall_not_implemented)
+#define __NR_lookup_dcookie 212
+__SYSCALL(__NR_lookup_dcookie, syscall_not_implemented)
+#define __NR_epoll_create 213
+__SYSCALL(__NR_epoll_create, syscall_not_implemented)
+#define __NR_epoll_ctl_old 214
+__SYSCALL(__NR_epoll_ctl_old, syscall_not_implemented)
+#define __NR_epoll_wait_old 215
+__SYSCALL(__NR_epoll_wait_old, syscall_not_implemented)
+#define __NR_remap_file_pages 216
+__SYSCALL(__NR_remap_file_pages, syscall_not_implemented)
+#define __NR_getdents64 217
+__SYSCALL(__NR_getdents64, syscall_not_implemented)
+#define __NR_set_tid_address 218
+__SYSCALL(__NR_set_tid_address, syscall_not_implemented)
+#define __NR_restart_syscall 219
+__SYSCALL(__NR_restart_syscall, syscall_not_implemented)
+#define __NR_semtimedop 220
+__SYSCALL(__NR_semtimedop, syscall_not_implemented)
+#define __NR_fadvise64 221
+__SYSCALL(__NR_fadvise64, syscall_not_implemented)
+#define __NR_timer_create 222
+__SYSCALL(__NR_timer_create, syscall_not_implemented)
+#define __NR_timer_settime 223
+__SYSCALL(__NR_timer_settime, syscall_not_implemented)
+#define __NR_timer_gettime 224
+__SYSCALL(__NR_timer_gettime, syscall_not_implemented)
+#define __NR_timer_getoverrun 225
+__SYSCALL(__NR_timer_getoverrun, syscall_not_implemented)
+#define __NR_timer_delete 226
+__SYSCALL(__NR_timer_delete, syscall_not_implemented)
+#define __NR_clock_settime 227
+__SYSCALL(__NR_clock_settime, syscall_not_implemented)
+#define __NR_clock_gettime 228
+__SYSCALL(__NR_clock_gettime, syscall_not_implemented)
+#define __NR_clock_getres 229
+__SYSCALL(__NR_clock_getres, syscall_not_implemented)
+#define __NR_clock_nanosleep 230
+__SYSCALL(__NR_clock_nanosleep, syscall_not_implemented)
+#define __NR_exit_group 231
+__SYSCALL(__NR_exit_group, syscall_not_implemented)
+#define __NR_epoll_wait 232
+__SYSCALL(__NR_epoll_wait, syscall_not_implemented)
+#define __NR_epoll_ctl 233
+__SYSCALL(__NR_epoll_ctl, syscall_not_implemented)
+#define __NR_tgkill 234
+__SYSCALL(__NR_tgkill, syscall_not_implemented)
+#define __NR_utimes 235
+__SYSCALL(__NR_utimes, syscall_not_implemented)
+#define __NR_vserver 236
+__SYSCALL(__NR_vserver, syscall_not_implemented)
+#define __NR_mbind 237
+__SYSCALL(__NR_mbind, syscall_not_implemented)
+#define __NR_set_mempolicy 238
+__SYSCALL(__NR_set_mempolicy, syscall_not_implemented)
+#define __NR_get_mempolicy 239
+__SYSCALL(__NR_get_mempolicy, syscall_not_implemented)
+#define __NR_mq_open 240
+__SYSCALL(__NR_mq_open, syscall_not_implemented)
+#define __NR_mq_unlink 241
+__SYSCALL(__NR_mq_unlink, syscall_not_implemented)
+#define __NR_mq_timedsend 242
+__SYSCALL(__NR_mq_timedsend, syscall_not_implemented)
+#define __NR_mq_timedreceive 243
+__SYSCALL(__NR_mq_timedreceive, syscall_not_implemented)
+#define __NR_mq_notify 244
+__SYSCALL(__NR_mq_notify, syscall_not_implemented)
+#define __NR_mq_getsetattr 245
+__SYSCALL(__NR_mq_getsetattr, syscall_not_implemented)
+#define __NR_kexec_load 246
+__SYSCALL(__NR_kexec_load, syscall_not_implemented)
+#define __NR_waitid 247
+__SYSCALL(__NR_waitid, syscall_not_implemented)
+#define __NR_add_key 248
+__SYSCALL(__NR_add_key, syscall_not_implemented)
+#define __NR_request_key 249
+__SYSCALL(__NR_request_key, syscall_not_implemented)
+#define __NR_keyctl 250
+__SYSCALL(__NR_keyctl, syscall_not_implemented)
+#define __NR_ioprio_set 251
+__SYSCALL(__NR_ioprio_set, syscall_not_implemented)
+#define __NR_ioprio_get 252
+__SYSCALL(__NR_ioprio_get, syscall_not_implemented)
+#define __NR_inotify_init 253
+__SYSCALL(__NR_inotify_init, syscall_not_implemented)
+#define __NR_inotify_add_watch 254
+__SYSCALL(__NR_inotify_add_watch, syscall_not_implemented)
+#define __NR_inotify_rm_watch 255
+__SYSCALL(__NR_inotify_rm_watch, syscall_not_implemented)
+#define __NR_migrate_pages 256
+__SYSCALL(__NR_migrate_pages, syscall_not_implemented)
+#define __NR_openat 257
+__SYSCALL(__NR_openat, syscall_not_implemented)
+#define __NR_mkdirat 258
+__SYSCALL(__NR_mkdirat, syscall_not_implemented)
+#define __NR_mknodat 259
+__SYSCALL(__NR_mknodat, syscall_not_implemented)
+#define __NR_fchownat 260
+__SYSCALL(__NR_fchownat, syscall_not_implemented)
+#define __NR_futimesat 261
+__SYSCALL(__NR_futimesat, syscall_not_implemented)
+#define __NR_newfstatat 262
+__SYSCALL(__NR_newfstatat, syscall_not_implemented)
+#define __NR_unlinkat 263
+__SYSCALL(__NR_unlinkat, syscall_not_implemented)
+#define __NR_renameat 264
+__SYSCALL(__NR_renameat, syscall_not_implemented)
+#define __NR_linkat 265
+__SYSCALL(__NR_linkat, syscall_not_implemented)
+#define __NR_symlinkat 266
+__SYSCALL(__NR_symlinkat, syscall_not_implemented)
+#define __NR_readlinkat 267
+__SYSCALL(__NR_readlinkat, syscall_not_implemented)
+#define __NR_fchmodat 268
+__SYSCALL(__NR_fchmodat, syscall_not_implemented)
+#define __NR_faccessat 269
+__SYSCALL(__NR_faccessat, syscall_not_implemented)
+#define __NR_pselect6 270
+__SYSCALL(__NR_pselect6, syscall_not_implemented)
+#define __NR_ppoll 271
+__SYSCALL(__NR_ppoll, syscall_not_implemented)
+#define __NR_unshare 272
+__SYSCALL(__NR_unshare, syscall_not_implemented)
+#define __NR_set_robust_list 273
+__SYSCALL(__NR_set_robust_list, syscall_not_implemented)
+#define __NR_get_robust_list 274
+__SYSCALL(__NR_get_robust_list, syscall_not_implemented)
+#define __NR_splice 275
+__SYSCALL(__NR_splice, syscall_not_implemented)
+#define __NR_tee 276
+__SYSCALL(__NR_tee, syscall_not_implemented)
+#define __NR_sync_file_range 277
+__SYSCALL(__NR_sync_file_range, syscall_not_implemented)
+#define __NR_vmsplice 278
+__SYSCALL(__NR_vmsplice, syscall_not_implemented)
+#define __NR_move_pages 279
+__SYSCALL(__NR_move_pages, syscall_not_implemented)
+#define __NR_utimensat 280
+__SYSCALL(__NR_utimensat, syscall_not_implemented)
+#define __IGNORE_getcpu /* implemented as a vsyscall */
+#define __NR_epoll_pwait 281
+__SYSCALL(__NR_epoll_pwait, syscall_not_implemented)
+#define __NR_signalfd 282
+__SYSCALL(__NR_signalfd, syscall_not_implemented)
+#define __NR_timerfd 283
+__SYSCALL(__NR_timerfd, syscall_not_implemented)
+#define __NR_eventfd 284
+__SYSCALL(__NR_eventfd, syscall_not_implemented)
+#define __NR_fallocate 285
+__SYSCALL(__NR_fallocate, syscall_not_implemented)
+
+/**
+ * LWK specific system calls.
+ */
+#define __NR_pmem_add 286
+__SYSCALL(__NR_pmem_add, sys_pmem_add)
+#define __NR_pmem_update 287
+__SYSCALL(__NR_pmem_update, sys_pmem_update)
+#define __NR_pmem_query 288
+__SYSCALL(__NR_pmem_query, sys_pmem_query)
+#define __NR_pmem_alloc 289
+__SYSCALL(__NR_pmem_alloc, sys_pmem_alloc)
+
+#define __NR_aspace_get_myid 290
+__SYSCALL(__NR_aspace_get_myid, sys_aspace_get_myid)
+#define __NR_aspace_create 291
+__SYSCALL(__NR_aspace_create, sys_aspace_create)
+#define __NR_aspace_destroy 292
+__SYSCALL(__NR_aspace_destroy, sys_aspace_destroy)
+#define __NR_aspace_find_hole 293
+__SYSCALL(__NR_aspace_find_hole, sys_aspace_find_hole)
+#define __NR_aspace_add_region 294
+__SYSCALL(__NR_aspace_add_region, sys_aspace_add_region)
+#define __NR_aspace_del_region 295
+__SYSCALL(__NR_aspace_del_region, sys_aspace_del_region)
+#define __NR_aspace_map_pmem 296
+__SYSCALL(__NR_aspace_map_pmem, sys_aspace_map_pmem)
+#define __NR_aspace_unmap_pmem 297
+__SYSCALL(__NR_aspace_unmap_pmem, sys_aspace_unmap_pmem)
+#define __NR_aspace_smartmap 298
+__SYSCALL(__NR_aspace_smartmap, sys_aspace_smartmap)
+#define __NR_aspace_unsmartmap 299
+__SYSCALL(__NR_aspace_unsmartmap, sys_aspace_unsmartmap)
+#define __NR_aspace_dump2console 300
+__SYSCALL(__NR_aspace_dump2console, sys_aspace_dump2console)
+
+#define __NR_task_get_myid 301
+__SYSCALL(__NR_task_get_myid, sys_task_get_myid)
+#define __NR_task_create 302
+__SYSCALL(__NR_task_create, sys_task_create)
+#define __NR_task_exit 303
+__SYSCALL(__NR_task_exit, sys_task_exit)
+#define __NR_task_yield 304
+__SYSCALL(__NR_task_yield, sys_task_yield)
+
+#define __NR_elf_hwcap 305
+__SYSCALL(__NR_task_yield, sys_elf_hwcap)
+
+#endif
--- /dev/null
+#ifndef _X86_64_USER_H
+#define _X86_64_USER_H
+
+#include <arch/types.h>
+#include <arch/page.h>
+/* Core file format: The core file is written in such a way that gdb
+ can understand it and provide useful information to the user.
+ There are quite a number of obstacles to being able to view the
+ contents of the floating point registers, and until these are
+ solved you will not be able to view the contents of them.
+ Actually, you can read in the core file and look at the contents of
+ the user struct to find out what the floating point registers
+ contain.
+
+ The actual file contents are as follows:
+ UPAGE: 1 page consisting of a user struct that tells gdb what is present
+ in the file. Directly after this is a copy of the task_struct, which
+ is currently not used by gdb, but it may come in useful at some point.
+ All of the registers are stored as part of the upage. The upage should
+ always be only one page.
+ DATA: The data area is stored. We use current->end_text to
+ current->brk to pick up all of the user variables, plus any memory
+ that may have been malloced. No attempt is made to determine if a page
+ is demand-zero or if a page is totally unused, we just cover the entire
+ range. All of the addresses are rounded in such a way that an integral
+ number of pages is written.
+ STACK: We need the stack information in order to get a meaningful
+ backtrace. We need to write the data from (esp) to
+ current->start_stack, so we round each of these off in order to be able
+ to write an integer number of pages.
+ The minimum core file size is 3 pages, or 12288 bytes. */
+
+/*
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
+ * interacting with the FXSR-format floating point environment. Floating
+ * point data can be accessed in the regular format in the usual manner,
+ * and both the standard and SIMD floating point data can be accessed via
+ * the new ptrace requests. In either case, changes to the FPU environment
+ * will be reflected in the task's state as expected.
+ *
+ * x86-64 support by Andi Kleen.
+ */
+
+/* This matches the 64bit FXSAVE format as defined by AMD. It is the same
+ as the 32bit format defined by Intel, except that the selector:offset pairs for
+ data and eip are replaced with flat 64bit pointers. */
+struct user_i387_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
+ unsigned short fop;
+ __u64 rip;
+ __u64 rdp;
+ __u32 mxcsr;
+ __u32 mxcsr_mask;
+ __u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
+ __u32 padding[24];
+};
+
+/*
+ * Segment register layout in coredumps.
+ */
+struct user_regs_struct {
+ unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10;
+ unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax;
+ unsigned long rip,cs,eflags;
+ unsigned long rsp,ss;
+ unsigned long fs_base, gs_base;
+ unsigned long ds,es,fs,gs;
+};
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+ this will be used by gdb to figure out where the data and stack segments
+ are within the file, and what virtual addresses to use. */
+struct user{
+/* We start with the registers, to mimic the way that "memory" is returned
+ from the ptrace(3,...) function. */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
+/* ptrace does not yet supply these. Someday.... */
+ int u_fpvalid; /* True if math co-processor being used. */
+ /* for this mess. Not yet used. */
+ int pad0;
+ struct user_i387_struct i387; /* Math Co-processor registers. */
+/* The rest of this junk is to help gdb figure out what goes where */
+ unsigned long int u_tsize; /* Text segment size (pages). */
+ unsigned long int u_dsize; /* Data segment size (pages). */
+ unsigned long int u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ long int signal; /* Signal that caused the core dump. */
+ int reserved; /* No longer used */
+ int pad1;
+ struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+ unsigned long u_debugreg[8];
+ unsigned long error_code; /* CPU error code or 0 */
+ unsigned long fault_address; /* CR3 or 0 */
+};
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _X86_64_USER_H */
--- /dev/null
+#ifndef _ASM_X86_64_VSYSCALL_H_
+#define _ASM_X86_64_VSYSCALL_H_
+
+enum vsyscall_num {
+ __NR_vgettimeofday,
+ __NR_vtime,
+ __NR_vgetcpu,
+};
+
+#define VSYSCALL_START (-10UL << 20)
+#define VSYSCALL_SIZE 1024
+#define VSYSCALL_END (-2UL << 20)
+#define VSYSCALL_MAPPED_PAGES 1
+#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
+
+#ifdef __KERNEL__
+#include <lwk/init.h>
+
+void __init vsyscall_map(void);
+void __init vsyscall_init(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_64_VSYSCALL_H_ */
--- /dev/null
+#ifndef _X86_64_XCALL_H
+#define _X86_64_XCALL_H
+
+#include <arch/ptrace.h>
+
+void arch_xcall_function_interrupt(struct pt_regs *regs, unsigned int vector);
+void arch_xcall_reschedule_interrupt(struct pt_regs *regs, unsigned int vector);
+
+#endif
--- /dev/null
+/* Copyright (c) 2007,2008 Sandia National Laboratories */
+
+#ifndef _LWK_ASPACE_H
+#define _LWK_ASPACE_H
+
+#include <lwk/types.h>
+#include <lwk/idspace.h>
+#include <arch/aspace.h>
+
+/**
+ * Valid user-space created address space IDs are in interval
+ * [ASPACE_MIN_ID, ASPACE_MAX_ID].
+ */
+#define ASPACE_MIN_ID 0
+#define ASPACE_MAX_ID 4094
+
+/**
+ * The address space ID to use for the init_task.
+ * Put it at the top of the space to keep it out of the way.
+ */
+#define INIT_ASPACE_ID ASPACE_MAX_ID
+
+/**
+ * Protection and memory type flags.
+ */
+#define VM_READ (1 << 0)
+#define VM_WRITE (1 << 1)
+#define VM_EXEC (1 << 2)
+#define VM_NOCACHE (1 << 3)
+#define VM_WRITETHRU (1 << 4)
+#define VM_GLOBAL (1 << 5)
+#define VM_USER (1 << 6)
+#define VM_KERNEL (1 << 7)
+#define VM_HEAP (1 << 8)
+#define VM_SMARTMAP (1 << 9)
+typedef unsigned long vmflags_t;
+
+/**
+ * Page sizes.
+ */
+#define VM_PAGE_4KB (1 << 12)
+#define VM_PAGE_2MB (1 << 21)
+#define VM_PAGE_1GB (1 << 30)
+typedef unsigned long vmpagesize_t;
+
+/**
+ * Core address space management API.
+ * These are accessible from both kernel-space and user-space (via syscalls).
+ */
+extern int aspace_get_myid(id_t *id);
+extern int aspace_create(id_t id_request, const char *name, id_t *id);
+extern int aspace_destroy(id_t id);
+
+extern int aspace_find_hole(id_t id,
+ vaddr_t start_hint, size_t extent, size_t alignment,
+ vaddr_t *start);
+
+extern int aspace_add_region(id_t id,
+ vaddr_t start, size_t extent,
+ vmflags_t flags, vmpagesize_t pagesz,
+ const char *name);
+extern int aspace_del_region(id_t id, vaddr_t start, size_t extent);
+
+extern int aspace_map_pmem(id_t id,
+ paddr_t pmem, vaddr_t start, size_t extent);
+extern int aspace_unmap_pmem(id_t id, vaddr_t start, size_t extent);
+
+extern int aspace_smartmap(id_t src, id_t dst, vaddr_t start, size_t extent);
+extern int aspace_unsmartmap(id_t src, id_t dst);
+
+extern int aspace_dump2console(id_t id);
+
+/**
+ * Convenience functions defined in liblwk.
+ */
+extern int
+aspace_map_region(
+ id_t id,
+ vaddr_t start,
+ size_t extent,
+ vmflags_t flags,
+ vmpagesize_t pagesz,
+ const char * name,
+ paddr_t pmem
+);
+
+extern int
+aspace_map_region_anywhere(
+ id_t id,
+ vaddr_t * start,
+ size_t extent,
+ vmflags_t flags,
+ vmpagesize_t pagesz,
+ const char * name,
+ paddr_t pmem
+);
+
+#ifdef __KERNEL__
+
+#include <lwk/spinlock.h>
+#include <lwk/list.h>
+#include <lwk/init.h>
+#include <arch/aspace.h>
+
+/**
+ * Address space structure.
+ */
+struct aspace {
+ spinlock_t lock; /* Must be held to access addr space */
+
+ id_t id; /* The address space's ID */
+ char name[16]; /* Address space's name */
+ struct hlist_node ht_link; /* Adress space hash table linkage */
+ int refcnt; /* # of users of this address space */
+
+ struct list_head region_list; /* Sorted non-overlapping region list */
+
+ /**
+ * The address space's "Heap" region spans from:
+ * [heap_start, heap_end)
+ */
+ vaddr_t heap_start;
+ vaddr_t heap_end;
+
+ /**
+ * The traditional UNIX data segment is contained in the address
+ * space's heap region, ranging from:
+ * [heap_start, brk)
+ *
+ * GLIBC/malloc will call the sys_brk() system call when it wants to
+ * expand or shrink the data segment. The kernel verifies that the new
+ * brk value is legal before updating it. The data segment may not
+ * extend beyond the address space's heap region or overlap with
+ * any anonymous mmap regions (see mmap_brk below).
+ */
+ vaddr_t brk;
+
+ /**
+ * Memory for anonymous mmap() regions is allocated from the top of the
+ * address space's heap region, ranging from:
+ * [mmap_brk, heap_end)
+ *
+ * GLIBC makes at least one mmap() call during pre-main app startup
+ * to allocate some "anonymous" memory (i.e., normal memory, not a
+ * file mapping). mmap_brk starts out set to heap_end and grows down
+ * as anonymous mmap() calls are made. The kernel takes care to prevent
+ * mmap_brk from extending into the UNIX data segment (see brk above).
+ */
+ vaddr_t mmap_brk;
+
+ /**
+ * Architecture specific address space data.
+ */
+ struct arch_aspace arch;
+};
+
+/**
+ * Valid address space IDs are in interval [__ASPACE_MIN_ID, __ASPACE_MAX_ID].
+ */
+#define __ASPACE_MIN_ID ASPACE_MIN_ID
+#define __ASPACE_MAX_ID ASPACE_MAX_ID+1 /* +1 for KERNEL_ASPACE_ID */
+
+/**
+ * ID of the address space used by kernel threads.
+ */
+#define KERNEL_ASPACE_ID ASPACE_MAX_ID+1
+
+/**
+ * Kernel-only unlocked versions of the core adress space management API.
+ * These assume that the aspace objects passed in have already been locked.
+ * The caller must unlock the aspaces. The caller must also ensure that
+ * interrupts are disabled before calling these functions.
+ */
+extern int __aspace_find_hole(struct aspace *aspace,
+ vaddr_t start_hint, size_t extent, size_t alignment,
+ vaddr_t *start);
+extern int __aspace_add_region(struct aspace *aspace,
+ vaddr_t start, size_t extent,
+ vmflags_t flags, vmpagesize_t pagesz,
+ const char *name);
+extern int __aspace_del_region(struct aspace *aspace,
+ vaddr_t start, size_t extent);
+extern int __aspace_map_pmem(struct aspace *aspace,
+ paddr_t pmem, vaddr_t start, size_t extent);
+extern int __aspace_unmap_pmem(struct aspace *aspace,
+ vaddr_t start, size_t extent);
+extern int __aspace_smartmap(struct aspace *src, struct aspace *dst,
+ vaddr_t start, size_t extent);
+extern int __aspace_unsmartmap(struct aspace *src, struct aspace *dst);
+
+/**
+ * Kernel-only address space management API.
+ * These are not accessible from user-space.
+ */
+extern int __init aspace_subsys_init(void);
+extern struct aspace *aspace_acquire(id_t id);
+extern void aspace_release(struct aspace *aspace);
+
+/**
+ * Architecture specific address space functions.
+ * Each architecture port must provide these.
+ */
+extern int arch_aspace_create(struct aspace *aspace);
+extern void arch_aspace_destroy(struct aspace *aspace);
+extern void arch_aspace_activate(struct aspace *aspace);
+extern int arch_aspace_map_page(struct aspace * aspace,
+ vaddr_t start, paddr_t paddr,
+ vmflags_t flags, vmpagesize_t pagesz);
+extern void arch_aspace_unmap_page(struct aspace * aspace,
+ vaddr_t start, vmpagesize_t pagesz);
+extern int arch_aspace_smartmap(struct aspace *src, struct aspace *dst,
+ vaddr_t start, size_t extent);
+extern int arch_aspace_unsmartmap(struct aspace *src, struct aspace *dst,
+ vaddr_t start, size_t extent);
+
+/**
+ * System call handlers.
+ */
+extern int sys_aspace_get_myid(id_t __user *id);
+extern int sys_aspace_create(id_t id_request, const char __user *name,
+ id_t __user *id);
+extern int sys_aspace_destroy(id_t id);
+extern int sys_aspace_find_hole(id_t id, vaddr_t start_hint, size_t extent,
+ size_t alignment, vaddr_t __user *start);
+extern int sys_aspace_add_region(id_t id,
+ vaddr_t start, size_t extent,
+ vmflags_t flags, vmpagesize_t pagesz,
+ const char __user *name);
+extern int sys_aspace_del_region(id_t id, vaddr_t start, size_t extent);
+extern int sys_aspace_map_pmem(id_t id,
+ paddr_t pmem, vaddr_t start, size_t extent);
+extern int sys_aspace_unmap_pmem(id_t id, vaddr_t start, size_t extent);
+extern int sys_aspace_smartmap(id_t src, id_t dst,
+ vaddr_t start, size_t extent);
+extern int sys_aspace_unsmartmap(id_t src, id_t dst);
+extern int sys_aspace_dump2console(id_t id);
+
+#endif
+#endif
--- /dev/null
+#ifndef _LWK_AUXVEC_H
+#define _LWK_AUXVEC_H
+
+/**
+ * Auxiliary info table entry. A table of these entries gets placed at the
+ * top of a new task's stack so user-space can figure out things that are
+ * difficult or impossible to determine otherwise (e.g., its base load
+ * address).
+ */
+struct aux_ent {
+ unsigned long id;
+ unsigned long val;
+};
+
+#include <arch/auxvec.h>
+
+/* Symbolic values for the entries in the auxiliary table
+ put on the initial stack */
+#define AT_NULL 0 /* end of vector */
+#define AT_IGNORE 1 /* entry should be ignored */
+#define AT_EXECFD 2 /* file descriptor of program */
+#define AT_PHDR 3 /* program headers for program */
+#define AT_PHENT 4 /* size of program header entry */
+#define AT_PHNUM 5 /* number of program headers */
+#define AT_PAGESZ 6 /* system page size */
+#define AT_BASE 7 /* base address of interpreter */
+#define AT_FLAGS 8 /* flags */
+#define AT_ENTRY 9 /* entry point of program */
+#define AT_NOTELF 10 /* program is not ELF */
+#define AT_UID 11 /* real uid */
+#define AT_EUID 12 /* effective uid */
+#define AT_GID 13 /* real gid */
+#define AT_EGID 14 /* effective gid */
+#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
+#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
+#define AT_CLKTCK 17 /* frequency at which times() increments */
+
+#define AT_SECURE 23 /* secure mode boolean */
+
+#define AT_ENTRIES 22 /* Number of entries in the auxiliary table */
+
+#endif /* _LWK_AUXVEC_H */
--- /dev/null
+#ifndef _LWK_BITMAP_H
+#define _LWK_BITMAP_H
+
+#ifndef __ASSEMBLY__
+
+#include <lwk/types.h>
+#include <lwk/bitops.h>
+#include <lwk/string.h>
+
+/*
+ * bitmaps provide bit arrays that consume one or more unsigned
+ * longs. The bitmap interface and available operations are listed
+ * here, in bitmap.h
+ *
+ * Function implementations generic to all architectures are in
+ * lib/bitmap.c. Functions implementations that are architecture
+ * specific are in various include/arch-<arch>/bitops.h headers
+ * and other arch/<arch> specific files.
+ *
+ * See lib/bitmap.c for more details.
+ */
+
+/*
+ * The available bitmap operations and their rough meaning in the
+ * case that the bitmap is a single unsigned long are thus:
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
+ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
+ * bitmap_parse(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
+ * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list
+ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
+ * bitmap_release_region(bitmap, pos, order) Free specified bit region
+ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ */
+
+/*
+ * Also the following operations in asm/bitops.h apply to bitmaps.
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ */
+
+/*
+ * The DECLARE_BITMAP(name,bits) macro, in lwk/types.h, can be used
+ * to declare an array named 'name' of just enough unsigned longs to
+ * contain all bit positions from 0 to 'bits' - 1.
+ */
+
+/*
+ * lib/bitmap.c provides these functions:
+ */
+
+extern int __bitmap_empty(const unsigned long *bitmap, int bits);
+extern int __bitmap_full(const unsigned long *bitmap, int bits);
+extern int __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+ int bits);
+extern void __bitmap_shift_right(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+extern void __bitmap_shift_left(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern int __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern int __bitmap_weight(const unsigned long *bitmap, int bits);
+
+extern int bitmap_scnprintf(char *buf, unsigned int len,
+ const unsigned long *src, int nbits);
+extern int bitmap_parse(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+extern int bitmap_scnlistprintf(char *buf, unsigned int len,
+ const unsigned long *src, int nbits);
+extern int bitmap_parselist(const char *buf, unsigned long *maskp,
+ int nmaskbits);
+extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
+ const unsigned long *old, const unsigned long *new, int bits);
+extern int bitmap_bitremap(int oldbit,
+ const unsigned long *old, const unsigned long *new, int bits);
+extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
+extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
+extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+( \
+ ((nbits) % BITS_PER_LONG) ? \
+ (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
+)
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline void bitmap_fill(unsigned long *dst, int nbits)
+{
+ size_t nlongs = BITS_TO_LONGS(nbits);
+ if (nlongs > 1) {
+ int len = (nlongs - 1) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
+ }
+ dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = *src;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memcpy(dst, src, len);
+ }
+}
+
+static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = *src1 & *src2;
+ else
+ __bitmap_and(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = *src1 | *src2;
+ else
+ __bitmap_or(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = *src1 ^ *src2;
+ else
+ __bitmap_xor(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = *src1 & ~(*src2);
+ else
+ __bitmap_andnot(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
+ int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
+ else
+ __bitmap_complement(dst, src, nbits);
+}
+
+static inline int bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_equal(src1, src2, nbits);
+}
+
+static inline int bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ else
+ return __bitmap_intersects(src1, src2, nbits);
+}
+
+static inline int bitmap_subset(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_subset(src1, src2, nbits);
+}
+
+static inline int bitmap_empty(const unsigned long *src, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_empty(src, nbits);
+}
+
+static inline int bitmap_full(const unsigned long *src, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_full(src, nbits);
+}
+
+static inline int bitmap_weight(const unsigned long *src, int nbits)
+{
+ return __bitmap_weight(src, nbits);
+}
+
+static inline void bitmap_shift_right(unsigned long *dst,
+ const unsigned long *src, int n, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = *src >> n;
+ else
+ __bitmap_shift_right(dst, src, n, nbits);
+}
+
+static inline void bitmap_shift_left(unsigned long *dst,
+ const unsigned long *src, int n, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
+ else
+ __bitmap_shift_left(dst, src, n, nbits);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LWK_BITMAP_H */
--- /dev/null
+#ifndef _LWK_BITOPS_H
+#define _LWK_BITOPS_H
+#include <arch/types.h>
+
+/*
+ * Include this here because some architectures need generic_ffs/fls in
+ * scope
+ */
+#include <arch/bitops.h>
+
+static __inline__ int get_bitmask_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count);
+ return order; /* We could be slightly more clever with -1 here... */
+}
+
+static __inline__ int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+/*
+ * rol32 - rotate a 32-bit value left
+ *
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (32 - shift));
+}
+
+/*
+ * ror32 - rotate a 32-bit value right
+ *
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u32 ror32(__u32 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (32 - shift));
+}
+
+static inline unsigned fls_long(unsigned long l)
+{
+ if (sizeof(l) == 4)
+ return fls(l);
+ return fls64(l);
+}
+
+#endif
--- /dev/null
+#ifndef _LWK_BOOTMEM_H
+#define _LWK_BOOTMEM_H
+
+#include <lwk/list.h>
+#include <lwk/init.h>
+
+/**
+ * Bootmem control structure.
+ *
+ * The node_bootmem_map field is a map pointer - the bits represent
+ * all physical memory pages (including holes) for the region represented
+ * by the enclosing bootmem_data structure.
+ */
+typedef struct bootmem_data {
+ unsigned long node_boot_start;
+ unsigned long node_low_pfn;
+ void *node_bootmem_map; // bitmap, one bit per page
+ unsigned long last_offset;
+ unsigned long last_pos;
+ unsigned long last_success; // previous allocation point,
+ // used to speed up search.
+ struct list_head list;
+} bootmem_data_t;
+
+extern unsigned long __init bootmem_bootmap_pages(unsigned long pages);
+extern unsigned long __init init_bootmem(unsigned long start,
+ unsigned long pages);
+extern void __init reserve_bootmem(unsigned long addr, unsigned long size);
+extern void * __init alloc_bootmem(unsigned long size);
+extern void * __init alloc_bootmem_aligned(unsigned long size,
+ unsigned long alignment);
+extern void __init free_bootmem(unsigned long addr, unsigned long size);
+extern void __init free_all_bootmem(void);
+
+extern void __init mem_subsys_init(void);
+extern void __init arch_memsys_init(size_t kmem_size);
+
+#endif
--- /dev/null
+/* Copyright (c) 2007, Sandia National Laboratories */
+
+#ifndef _LWK_BUDDY_H
+#define _LWK_BUDDY_H
+
+#include <lwk/list.h>
+
+/**
+ * This structure stores the state of a buddy system memory allocator object.
+ */
+struct buddy_mempool {
+ unsigned long base_addr; /* base address of the memory pool */
+ unsigned long pool_order; /* size of memory pool = 2^pool_order */
+ unsigned long min_order; /* minimum allocatable block size */
+
+ unsigned long num_blocks; /* number of bits in tag_bits */
+ unsigned long *tag_bits; /* one bit for each 2^min_order block
+ * 0 = block is allocated
+ * 1 = block is available
+ */
+
+ struct list_head *avail; /* one free list for each block size,
+ * indexed by block order:
+ * avail[i] = free list of 2^i blocks
+ */
+};
+
+struct buddy_mempool *
+buddy_init(
+ unsigned long base_addr,
+ unsigned long pool_order,
+ unsigned long min_order
+);
+
+void *buddy_alloc(struct buddy_mempool *mp, unsigned long order);
+void buddy_free(struct buddy_mempool *mp, void *addr, unsigned long order);
+
+void buddy_dump_mempool(struct buddy_mempool *mp);
+
+#endif
--- /dev/null
+unifdef-y += generic.h swab.h
+header-y += big_endian.h little_endian.h
--- /dev/null
+#ifndef _LWK_BYTEORDER_BIG_ENDIAN_H
+#define _LWK_BYTEORDER_BIG_ENDIAN_H
+
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#endif
+#ifndef __BIG_ENDIAN_BITFIELD
+#define __BIG_ENDIAN_BITFIELD
+#endif
+
+#include <lwk/types.h>
+#include <lwk/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)(__u32)(x))
+#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
+#define __constant_htons(x) ((__force __be16)(__u16)(x))
+#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
+#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
+#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
+#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
+#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
+#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
+#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)__swab64p(p);
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)__swab32p(p);
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)__swab16p(p);
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)*p;
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)*p;
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)*p;
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return (__force __u16)*p;
+}
+#define __cpu_to_le64s(x) __swab64s((x))
+#define __le64_to_cpus(x) __swab64s((x))
+#define __cpu_to_le32s(x) __swab32s((x))
+#define __le32_to_cpus(x) __swab32s((x))
+#define __cpu_to_le16s(x) __swab16s((x))
+#define __le16_to_cpus(x) __swab16s((x))
+#define __cpu_to_be64s(x) do {} while (0)
+#define __be64_to_cpus(x) do {} while (0)
+#define __cpu_to_be32s(x) do {} while (0)
+#define __be32_to_cpus(x) do {} while (0)
+#define __cpu_to_be16s(x) do {} while (0)
+#define __be16_to_cpus(x) do {} while (0)
+
+#include <lwk/byteorder/generic.h>
+
+#endif /* _LWK_BYTEORDER_BIG_ENDIAN_H */
--- /dev/null
+#ifndef _LWK_BYTEORDER_GENERIC_H
+#define _LWK_BYTEORDER_GENERIC_H
+
+/*
+ * lwk/byteorder/generic.h
+ * Generic Byte-reordering support
+ *
+ * The "... p" macros, like le64_to_cpup, can be used with pointers
+ * to unaligned data, but there will be a performance penalty on
+ * some architectures. Use get_unaligned for unaligned data.
+ *
+ * Francois-Rene Rideau <fare@tunes.org> 19970707
+ * gathered all the good ideas from all asm-foo/byteorder.h into one file,
+ * cleaned them up.
+ * I hope it is compliant with non-GCC compilers.
+ * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
+ * because I wasn't sure it would be ok to put it in types.h
+ * Upgraded it to 2.1.43
+ * Francois-Rene Rideau <fare@tunes.org> 19971012
+ * Upgraded it to 2.1.57
+ * to please Linus T., replaced huge #ifdef's between little/big endian
+ * by nestedly #include'd files.
+ * Francois-Rene Rideau <fare@tunes.org> 19971205
+ * Made it to 2.1.71; now a facelift:
+ * Put files under include/linux/byteorder/
+ * Split swab from generic support.
+ *
+ * TODO:
+ * = Regular kernel maintainers could also replace all these manual
+ * byteswap macros that remain, disseminated among drivers,
+ * after some grep or the sources...
+ * = Linus might want to rename all these macros and files to fit his taste,
+ * to fit his personal naming scheme.
+ * = it seems that a few drivers would also appreciate
+ * nybble swapping support...
+ * = every architecture could add their byteswap macro in asm/byteorder.h
+ * see how some architectures already do (i386, alpha, ppc, etc)
+ * = cpu_to_beXX and beXX_to_cpu might some day need to be well
+ * distinguished throughout the kernel. This is not the case currently,
+ * since little endian, big endian, and pdp endian machines needn't it.
+ * But this might be the case for, say, a port of Linux to 20/21 bit
+ * architectures (and F21 Linux addict around?).
+ */
+
+/*
+ * The following macros are to be defined by <asm/byteorder.h>:
+ *
+ * Conversion of long and short int between network and host format
+ * ntohl(__u32 x)
+ * ntohs(__u16 x)
+ * htonl(__u32 x)
+ * htons(__u16 x)
+ * It seems that some programs (which? where? or perhaps a standard? POSIX?)
+ * might like the above to be functions, not macros (why?).
+ * if that's true, then detect them, and take measures.
+ * Anyway, the measure is: define only ___ntohl as a macro instead,
+ * and in a separate file, have
+ * unsigned long inline ntohl(x){return ___ntohl(x);}
+ *
+ * The same for constant arguments
+ * __constant_ntohl(__u32 x)
+ * __constant_ntohs(__u16 x)
+ * __constant_htonl(__u32 x)
+ * __constant_htons(__u16 x)
+ *
+ * Conversion of XX-bit integers (16- 32- or 64-)
+ * between native CPU format and little/big endian format
+ * 64-bit stuff only defined for proper architectures
+ * cpu_to_[bl]eXX(__uXX x)
+ * [bl]eXX_to_cpu(__uXX x)
+ *
+ * The same, but takes a pointer to the value to convert
+ * cpu_to_[bl]eXXp(__uXX x)
+ * [bl]eXX_to_cpup(__uXX x)
+ *
+ * The same, but change in situ
+ * cpu_to_[bl]eXXs(__uXX x)
+ * [bl]eXX_to_cpus(__uXX x)
+ *
+ * See asm-foo/byteorder.h for examples of how to provide
+ * architecture-optimized versions
+ *
+ */
+
+
+#if defined(__KERNEL__)
+/*
+ * inside the kernel, we can use nicknames;
+ * outside of it, we must avoid POSIX namespace pollution...
+ */
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+#endif
+
+
+#if defined(__KERNEL__)
+/*
+ * Handle ntohl and suches. These have various compatibility
+ * issues - like we want to give the prototype even though we
+ * also have a macro for them in case some strange program
+ * wants to take the address of the thing or something..
+ *
+ * Note that these used to return a "long" in libc5, even though
+ * long is often 64-bit these days.. Thus the casts.
+ *
+ * They have to be macros in order to do the constant folding
+ * correctly - if the argument passed into a inline function
+ * it is no longer constant according to gcc..
+ */
+
+#undef ntohl
+#undef ntohs
+#undef htonl
+#undef htons
+
+/*
+ * Do the prototypes. Somebody might want to take the
+ * address or some such sick thing..
+ */
+extern __u32 ntohl(__be32);
+extern __be32 htonl(__u32);
+extern __u16 ntohs(__be16);
+extern __be16 htons(__u16);
+
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+
+#define ___htonl(x) __cpu_to_be32(x)
+#define ___htons(x) __cpu_to_be16(x)
+#define ___ntohl(x) __be32_to_cpu(x)
+#define ___ntohs(x) __be16_to_cpu(x)
+
+#define htonl(x) ___htonl(x)
+#define ntohl(x) ___ntohl(x)
+#define htons(x) ___htons(x)
+#define ntohs(x) ___ntohs(x)
+
+#endif /* OPTIMIZE */
+
+#endif /* KERNEL */
+
+
+#endif /* _LWK_BYTEORDER_GENERIC_H */
--- /dev/null
+#ifndef _LWK_BYTEORDER_LITTLE_ENDIAN_H
+#define _LWK_BYTEORDER_LITTLE_ENDIAN_H
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#include <lwk/types.h>
+#include <lwk/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
+#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
+#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
+#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
+#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return (__force __le64)*p;
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+ return (__force __u64)*p;
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return (__force __le32)*p;
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+ return (__force __u32)*p;
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return (__force __le16)*p;
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+ return (__force __u16)*p;
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return (__force __be64)__swab64p(p);
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return (__force __be32)__swab32p(p);
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return (__force __be16)__swab16p(p);
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+#define __cpu_to_le64s(x) do {} while (0)
+#define __le64_to_cpus(x) do {} while (0)
+#define __cpu_to_le32s(x) do {} while (0)
+#define __le32_to_cpus(x) do {} while (0)
+#define __cpu_to_le16s(x) do {} while (0)
+#define __le16_to_cpus(x) do {} while (0)
+#define __cpu_to_be64s(x) __swab64s((x))
+#define __be64_to_cpus(x) __swab64s((x))
+#define __cpu_to_be32s(x) __swab32s((x))
+#define __be32_to_cpus(x) __swab32s((x))
+#define __cpu_to_be16s(x) __swab16s((x))
+#define __be16_to_cpus(x) __swab16s((x))
+
+#include <lwk/byteorder/generic.h>
+
+#endif /* _LWK_BYTEORDER_LITTLE_ENDIAN_H */
--- /dev/null
+#ifndef _LWK_BYTEORDER_SWAB_H
+#define _LWK_BYTEORDER_SWAB_H
+
+/*
+ * lwk/byteorder/swab.h
+ * Byte-swapping, independently from CPU endianness
+ * swabXX[ps]?(foo)
+ *
+ * Francois-Rene Rideau <fare@tunes.org> 19971205
+ * separated swab functions from cpu_to_XX,
+ * to clean up support for bizarre-endian architectures.
+ *
+ * See asm-i386/byteorder.h and suches for examples of how to provide
+ * architecture-dependent optimized versions
+ *
+ */
+
+#include <lwk/compiler.h>
+
+/* casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define ___swab16(x) \
+({ \
+ __u16 __x = (x); \
+ ((__u16)( \
+ (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
+})
+
+#define ___swab32(x) \
+({ \
+ __u32 __x = (x); \
+ ((__u32)( \
+ (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
+})
+
+#define ___swab64(x) \
+({ \
+ __u64 __x = (x); \
+ ((__u64)( \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
+})
+
+#define ___constant_swab16(x) \
+ ((__u16)( \
+ (((__u16)(x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(x) & (__u16)0xff00U) >> 8) ))
+#define ___constant_swab32(x) \
+ ((__u32)( \
+ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
+#define ___constant_swab64(x) \
+ ((__u64)( \
+ (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
+
+/*
+ * provide defaults when no architecture-specific optimization is detected
+ */
+#ifndef __arch__swab16
+# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
+#endif
+#ifndef __arch__swab32
+# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+#endif
+#ifndef __arch__swab64
+# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+#endif
+
+#ifndef __arch__swab16p
+# define __arch__swab16p(x) __arch__swab16(*(x))
+#endif
+#ifndef __arch__swab32p
+# define __arch__swab32p(x) __arch__swab32(*(x))
+#endif
+#ifndef __arch__swab64p
+# define __arch__swab64p(x) __arch__swab64(*(x))
+#endif
+
+#ifndef __arch__swab16s
+# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
+#endif
+#ifndef __arch__swab32s
+# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
+#endif
+#ifndef __arch__swab64s
+# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
+#endif
+
+
+/*
+ * Allow constant folding
+ */
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+# define __swab16(x) \
+(__builtin_constant_p((__u16)(x)) ? \
+ ___swab16((x)) : \
+ __fswab16((x)))
+# define __swab32(x) \
+(__builtin_constant_p((__u32)(x)) ? \
+ ___swab32((x)) : \
+ __fswab32((x)))
+# define __swab64(x) \
+(__builtin_constant_p((__u64)(x)) ? \
+ ___swab64((x)) : \
+ __fswab64((x)))
+#else
+# define __swab16(x) __fswab16(x)
+# define __swab32(x) __fswab32(x)
+# define __swab64(x) __fswab64(x)
+#endif /* OPTIMIZE */
+
+
+static __inline__ __attribute_const__ __u16 __fswab16(__u16 x)
+{
+ return __arch__swab16(x);
+}
+static __inline__ __u16 __swab16p(const __u16 *x)
+{
+ return __arch__swab16p(x);
+}
+static __inline__ void __swab16s(__u16 *addr)
+{
+ __arch__swab16s(addr);
+}
+
+static __inline__ __attribute_const__ __u32 __fswab32(__u32 x)
+{
+ return __arch__swab32(x);
+}
+static __inline__ __u32 __swab32p(const __u32 *x)
+{
+ return __arch__swab32p(x);
+}
+static __inline__ void __swab32s(__u32 *addr)
+{
+ __arch__swab32s(addr);
+}
+
+#ifdef __BYTEORDER_HAS_U64__
+static __inline__ __attribute_const__ __u64 __fswab64(__u64 x)
+{
+# ifdef __SWAB_64_THRU_32__
+ __u32 h = x >> 32;
+ __u32 l = x & ((1ULL<<32)-1);
+ return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
+# else
+ return __arch__swab64(x);
+# endif
+}
+static __inline__ __u64 __swab64p(const __u64 *x)
+{
+ return __arch__swab64p(x);
+}
+static __inline__ void __swab64s(__u64 *addr)
+{
+ __arch__swab64s(addr);
+}
+#endif /* __BYTEORDER_HAS_U64__ */
+
+#if defined(__KERNEL__)
+#define swab16 __swab16
+#define swab32 __swab32
+#define swab64 __swab64
+#define swab16p __swab16p
+#define swab32p __swab32p
+#define swab64p __swab64p
+#define swab16s __swab16s
+#define swab32s __swab32s
+#define swab64s __swab64s
+#endif
+
+#endif /* _LWK_BYTEORDER_SWAB_H */
--- /dev/null
+#ifndef _LWK_CACHE_H
+#define _LWK_CACHE_H
+
+#include <lwk/kernel.h>
+#include <arch/cache.h>
+
+#ifndef L1_CACHE_ALIGN
+#define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES)
+#endif
+
+#ifndef SMP_CACHE_BYTES
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef ____cacheline_aligned
+#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#endif
+
+#ifndef __cacheline_aligned
+#define __cacheline_aligned \
+ __attribute__((__aligned__(SMP_CACHE_BYTES), \
+ __section__(".data.cacheline_aligned")))
+#endif
+
+#ifndef __cacheline_aligned_in_smp
+#define __cacheline_aligned_in_smp __cacheline_aligned
+#endif
+
+/*
+ * The maximum alignment needed for some critical structures
+ * These could be inter-node cacheline sizes/L3 cacheline
+ * size etc. Define this in asm/cache.h for your arch
+ */
+#ifndef INTERNODE_CACHE_SHIFT
+#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
+#endif
+
+#if !defined(____cacheline_internodealigned_in_smp)
+#define ____cacheline_internodealigned_in_smp \
+ __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
+#endif
+
+#endif /* _LWK_CACHE_H */
--- /dev/null
+/* Never include this file directly. Include <lwk/compiler.h> instead. */
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+/* This macro obfuscates arithmetic on a variable address so that gcc
+ shouldn't recognize the original var, and make assumptions about it */
+/*
+ * Versions of the ppc64 compiler before 4.1 had a bug where use of
+ * RELOC_HIDE could trash r30. The bug can be worked around by changing
+ * the inline assembly constraint from =g to =r, in this particular
+ * case either is valid.
+ */
+#define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); })
+
+
+#define inline inline __attribute__((always_inline))
+#define __inline__ __inline__ __attribute__((always_inline))
+#define __inline __inline __attribute__((always_inline))
+#define __deprecated __attribute__((deprecated))
+#define noinline __attribute__((noinline))
+#define __attribute_pure__ __attribute__((pure))
+#define __attribute_const__ __attribute__((__const__))
--- /dev/null
+/* Never include this file directly. Include <lwk/compiler.h> instead. */
+
+/* These definitions are for GCC v4.x. */
+#include <lwk/compiler-gcc.h>
+
+#ifdef CONFIG_FORCED_INLINING
+# undef inline
+# undef __inline__
+# undef __inline
+# define inline inline __attribute__((always_inline))
+# define __inline__ __inline__ __attribute__((always_inline))
+# define __inline __inline __attribute__((always_inline))
+#endif
+
+#define __attribute_used__ __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+#define __always_inline inline __attribute__((always_inline))
--- /dev/null
+#ifndef _LWK_COMPILER_H
+#define _LWK_COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+# define __kernel /* default address space */
+# define __safe __attribute__((safe))
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __iomem __attribute__((noderef, address_space(2)))
+# define __acquires(x) __attribute__((context(0,1)))
+# define __releases(x) __attribute__((context(1,0)))
+# define __acquire(x) __context__(1)
+# define __release(x) __context__(-1)
+# define __cond_lock(x) ((x) ? ({ __context__(1); 1; }) : 0)
+extern void __chk_user_ptr(void __user *);
+extern void __chk_io_ptr(void __iomem *);
+#else
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __nocast
+# define __iomem
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+# define __builtin_warning(x, y...) (1)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x) (x)
+#endif
+
+#ifdef __KERNEL__
+
+#if __GNUC__ > 4
+#error no compiler-gcc.h file for this gcc version
+#elif __GNUC__ == 4
+# include <lwk/compiler-gcc4.h>
+#else
+# error Sorry, your compiler is too old/not recognized.
+#endif
+
+/*
+ * Generic compiler-dependent macros required for kernel
+ * build go below this comment. Actual compiler/compiler version
+ * specific implementations come from the above header files
+ */
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+/* Optimization barrier */
+#ifndef barrier
+# define barrier() __memory_barrier()
+#endif
+
+#ifndef RELOC_HIDE
+# define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __ptr = (unsigned long) (ptr); \
+ (typeof(ptr)) (__ptr + (off)); })
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
+ * warning for each use, in hopes of speeding the functions removal.
+ * Usage is:
+ * int __deprecated foo(void)
+ */
+#ifndef __deprecated
+# define __deprecated /* unimplemented */
+#endif
+
+#ifndef __must_check
+#define __must_check
+#endif
+
+/*
+ * Allow us to avoid 'defined but not used' warnings on functions and data,
+ * as well as force them to be emitted to the assembly file.
+ *
+ * As of gcc 3.3, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.3, static data not so
+ * marked will not be elided, but this may change in a future gcc version.
+ *
+ * In prior versions of gcc, such functions and data would be emitted, but
+ * would be warned about except with attribute((unused)).
+ */
+#ifndef __attribute_used__
+# define __attribute_used__ /* unimplemented */
+#endif
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions have no effects except the return value and their
+ * return value depends only on the parameters and/or global
+ * variables. Such a function can be subject to common subexpression
+ * elimination and loop optimization just as an arithmetic operator
+ * would be.
+ * [...]
+ */
+#ifndef __attribute_pure__
+# define __attribute_pure__ /* unimplemented */
+#endif
+
+#ifndef noinline
+#define noinline
+#endif
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions do not examine any values except their arguments,
+ * and have no effects except the return value. Basically this is
+ * just slightly more strict class than the `pure' attribute above,
+ * since function is not allowed to read global memory.
+ *
+ * Note that a function that has pointer arguments and examines the
+ * data pointed to must _not_ be declared `const'. Likewise, a
+ * function that calls a non-`const' function usually must not be
+ * `const'. It does not make sense for a `const' function to return
+ * `void'.
+ */
+#ifndef __attribute_const__
+# define __attribute_const__ /* unimplemented */
+#endif
+
+#endif /* _LWK_COMPILER_H */
--- /dev/null
+#ifndef _LWK_CONSOLE_H
+#define _LWK_CONSOLE_H
+
+#include <lwk/list.h>
+#include <lwk/compiler.h>
+
+/** Console structure.
+ *
+ * Each console in the system is represented by one of these
+ * structures. A console driver (e.g., VGA, Serial) fills in
+ * one of these structures and passes it to ::console_register().
+ */
+struct console {
+ char name[64];
+ void (*write)(struct console *, const char *);
+ void * private_data;
+
+ struct list_head next;
+};
+
+extern void console_register(struct console *);
+extern void console_write(const char *);
+extern void console_init(void);
+
+#endif
--- /dev/null
+/* const.h: Macros for dealing with constants. */
+
+#ifndef _LWK_CONST_H
+#define _LWK_CONST_H
+
+/* Some constant macros are used in both assembler and
+ * C code. Therefore we cannot annotate them always with
+ * 'UL' and other type specifiers unilaterally. We
+ * use the following macros to deal with this.
+ */
+
+#ifdef __ASSEMBLY__
+#define _AC(X,Y) X
+#else
+#define __AC(X,Y) (X##Y)
+#define _AC(X,Y) __AC(X,Y)
+#endif
+
+#endif /* !(_LWK_CONST_H) */
--- /dev/null
+#ifndef _LWK_CPU_H
+#define _LWK_CPU_H
+
+/** Maximum number of CPUs supported. */
+#define NR_CPUS CONFIG_NR_CPUS
+
+#endif
--- /dev/null
+#ifndef _LWK_CPUINFO_H
+#define _LWK_CPUINFO_H
+
+#include <lwk/types.h>
+#include <lwk/cpumask.h>
+#include <lwk/cache.h>
+#include <lwk/aspace.h>
+#include <arch/cpuinfo.h>
+
+/**
+ * CPU information.
+ * Each CPU in the system is described by one of these structures.
+ */
+struct cpuinfo {
+ /* Identification */
+ uint16_t logical_id; // CPU's kernel assigned ID
+ uint16_t physical_id; // CPU's hardware ID
+
+ /* Topology information */
+ uint16_t numa_node_id; // NUMA node ID this CPU is in
+ cpumask_t numa_node_map; // CPUs in this CPU's NUMA node
+ cpumask_t llc_share_map; // CPUs sharing last level cache
+
+ /* Physical packaging */
+ uint16_t phys_socket_id; // Physical socket/package
+ uint16_t phys_core_id; // Core ID in the socket/package
+ uint16_t phys_hwthread_id; // Hardware thread ID in core
+
+ /* Memory management info */
+ vmpagesize_t pagesz_mask; // Page sizes supported by the CPU
+
+ /* Architecture specific */
+ struct arch_cpuinfo arch;
+} ____cacheline_aligned;
+
+extern struct cpuinfo cpu_info[NR_CPUS];
+extern cpumask_t cpu_present_map;
+extern cpumask_t cpu_online_map;
+
+/**
+ * Returns the number of CPUs in the system.
+ */
+#define num_cpus() cpus_weight(cpu_present_map)
+
+extern void print_cpuinfo(struct cpuinfo *);
+
+#endif
--- /dev/null
+#ifndef _LWK_CPUMASK_H
+#define _LWK_CPUMASK_H
+
+/*
+ * Cpumasks provide a bitmap suitable for representing the
+ * set of CPU's in a system, one bit position per CPU number.
+ *
+ * See detailed comments in the file linux/bitmap.h describing the
+ * data type on which these cpumasks are based.
+ *
+ * For details of cpumask_scnprintf() and cpumask_parse(),
+ * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ * For details of cpulist_scnprintf() and cpulist_parse(), see
+ * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
+ * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
+ * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
+ *
+ * The available cpumask operations are:
+ *
+ * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
+ * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
+ * void cpus_setall(mask) set all bits
+ * void cpus_clear(mask) clear all bits
+ * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
+ * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
+ *
+ * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
+ * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
+ * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
+ * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
+ * void cpus_complement(dst, src) dst = ~src
+ *
+ * int cpus_equal(mask1, mask2) Does mask1 == mask2?
+ * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
+ * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
+ * int cpus_empty(mask) Is mask empty (no bits sets)?
+ * int cpus_full(mask) Is mask full (all bits sets)?
+ * int cpus_weight(mask) Hamming weigh - number of set bits
+ *
+ * void cpus_shift_right(dst, src, n) Shift right
+ * void cpus_shift_left(dst, src, n) Shift left
+ *
+ * int first_cpu(mask) Number lowest set bit, or NR_CPUS
+ * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
+ *
+ * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
+ * CPU_MASK_ALL Initializer - all bits set
+ * CPU_MASK_NONE Initializer - no bits set
+ * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
+ *
+ * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
+ * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
+ * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
+ * int cpulist_parse(buf, map) Parse ascii string as cpulist
+ * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
+ * int cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
+ *
+ * for_each_cpu_mask(cpu, mask) for-loop cpu over mask
+ *
+ * int num_online_cpus() Number of online CPUs
+ * int num_possible_cpus() Number of all possible CPUs
+ * int num_present_cpus() Number of present CPUs
+ *
+ * int cpu_online(cpu) Is some cpu online?
+ * int cpu_possible(cpu) Is some cpu possible?
+ * int cpu_present(cpu) Is some cpu present (can schedule)?
+ *
+ * int any_online_cpu(mask) First online cpu in mask
+ *
+ * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
+ * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
+ * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
+ *
+ * Subtlety:
+ * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
+ * to generate slightly worse code. Note for example the additional
+ * 40 lines of assembly code compiling the "for each possible cpu"
+ * loops buried in the disk_stat_read() macros calls when compiling
+ * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
+ * one-line #define for cpu_isset(), instead of wrapping an inline
+ * inside a macro, the way we do the other calls.
+ */
+
+/**
+ * Fixed size cpumask structure for user-space.
+ * As long as CPU_MAX_ID >= NR_CPUS, we're good to go...
+ * otherwise we need to bump up CPU_MAX_ID and therefore break
+ * user-level binary compatibility, causing a flag day.
+ */
+
+#define CPU_MIN_ID 0
+#define CPU_MAX_ID 2047
+typedef struct {
+ unsigned long bits[(CPU_MAX_ID+1)/(sizeof(unsigned long) * 8)];
+} user_cpumask_t;
+
+#ifdef __KERNEL__
+
+#include <lwk/kernel.h>
+#include <lwk/bitmap.h>
+#include <lwk/cpu.h>
+
+#if (CPU_MAX_ID + 1 < NR_CPUS)
+#error "NR_CPUS must be <= CPU_MAX_ID"
+#endif
+
+typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+extern cpumask_t _unused_cpumask_arg_;
+
+static inline void
+cpumask_kernel2user(const cpumask_t *kernel, user_cpumask_t *user)
+{
+ memset(user, 0, sizeof(user_cpumask_t));
+ memcpy(user, kernel, sizeof(*kernel));
+}
+
+static inline void
+cpumask_user2kernel(const user_cpumask_t *user, cpumask_t *kernel)
+{
+ memcpy(kernel, user, sizeof(*kernel));
+}
+
+#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
+static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
+{
+ set_bit(cpu, dstp->bits);
+}
+
+#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
+static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+{
+ clear_bit(cpu, dstp->bits);
+}
+
+#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
+static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
+static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
+
+#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
+static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+{
+ return test_and_set_bit(cpu, addr->bits);
+}
+
+#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_andnot(dst, src1, src2) \
+ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
+static inline void __cpus_complement(cpumask_t *dstp,
+ const cpumask_t *srcp, int nbits)
+{
+ bitmap_complement(dstp->bits, srcp->bits, nbits);
+}
+
+#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_equal(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_intersects(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_subset(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
+static inline int __cpus_full(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_full(srcp->bits, nbits);
+}
+
+#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
+static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+#define cpus_shift_right(dst, src, n) \
+ __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_right(cpumask_t *dstp,
+ const cpumask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define cpus_shift_left(dst, src, n) \
+ __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_left(cpumask_t *dstp,
+ const cpumask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+
+int __first_cpu(const cpumask_t *srcp);
+#define first_cpu(src) __first_cpu(&(src))
+int __next_cpu(int n, const cpumask_t *srcp);
+#define next_cpu(n, src) __next_cpu((n), &(src))
+
+#define cpumask_of_cpu(cpu) \
+({ \
+ typeof(_unused_cpumask_arg_) m; \
+ if (sizeof(m) == sizeof(unsigned long)) { \
+ m.bits[0] = 1UL<<(cpu); \
+ } else { \
+ cpus_clear(m); \
+ cpu_set((cpu), m); \
+ } \
+ m; \
+})
+
+#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
+
+#if NR_CPUS <= BITS_PER_LONG
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#else
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#endif
+
+#define CPU_MASK_NONE \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
+} }
+
+#define CPU_MASK_CPU0 \
+(cpumask_t) { { \
+ [0] = 1UL \
+} }
+
+#define cpus_addr(src) ((src).bits)
+
+#define cpumask_scnprintf(buf, len, src) \
+ __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
+static inline int __cpumask_scnprintf(char *buf, int len,
+ const cpumask_t *srcp, int nbits)
+{
+ return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+}
+
+#define cpumask_parse(ubuf, ulen, dst) \
+ __cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
+static inline int __cpumask_parse(const char __user *buf, int len,
+ cpumask_t *dstp, int nbits)
+{
+ return bitmap_parse(buf, len, dstp->bits, nbits);
+}
+
+#define cpulist_scnprintf(buf, len, src) \
+ __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
+static inline int __cpulist_scnprintf(char *buf, int len,
+ const cpumask_t *srcp, int nbits)
+{
+ return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
+}
+
+#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
+static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
+{
+ return bitmap_parselist(buf, dstp->bits, nbits);
+}
+
+#define cpu_remap(oldbit, old, new) \
+ __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
+static inline int __cpu_remap(int oldbit,
+ const cpumask_t *oldp, const cpumask_t *newp, int nbits)
+{
+ return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
+}
+
+#define cpus_remap(dst, src, old, new) \
+ __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
+static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
+ const cpumask_t *oldp, const cpumask_t *newp, int nbits)
+{
+ bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
+}
+
+#define for_each_cpu_mask(cpu, mask) \
+ for ((cpu) = first_cpu(mask); \
+ (cpu) < NR_CPUS; \
+ (cpu) = next_cpu((cpu), (mask)))
+
+/*
+ * The following particular system cpumasks and operations manage
+ * possible, present and online cpus. Each of them is a fixed size
+ * bitmap of size NR_CPUS.
+ *
+ * #ifdef CONFIG_HOTPLUG_CPU
+ * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
+ * cpu_present_map - has bit 'cpu' set iff cpu is populated
+ * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
+ * #else
+ * cpu_possible_map - has bit 'cpu' set iff cpu is populated
+ * cpu_present_map - copy of cpu_possible_map
+ * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
+ * #endif
+ *
+ * In either case, NR_CPUS is fixed at compile time, as the static
+ * size of these bitmaps. The cpu_possible_map is fixed at boot
+ * time, as the set of CPU id's that it is possible might ever
+ * be plugged in at anytime during the life of that system boot.
+ * The cpu_present_map is dynamic(*), representing which CPUs
+ * are currently plugged in. And cpu_online_map is the dynamic
+ * subset of cpu_present_map, indicating those CPUs available
+ * for scheduling.
+ *
+ * If HOTPLUG is enabled, then cpu_possible_map is forced to have
+ * all NR_CPUS bits set, otherwise it is just the set of CPUs that
+ * ACPI reports present at boot.
+ *
+ * If HOTPLUG is enabled, then cpu_present_map varies dynamically,
+ * depending on what ACPI reports as currently plugged in, otherwise
+ * cpu_present_map is just a copy of cpu_possible_map.
+ *
+ * (*) Well, cpu_present_map is dynamic in the hotplug case. If not
+ * hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
+ *
+ * Subtleties:
+ * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * assumption that their single CPU is online. The UP
+ * cpu_{online,possible,present}_maps are placebos. Changing them
+ * will have no useful affect on the following num_*_cpus()
+ * and cpu_*() macros in the UP case. This ugliness is a UP
+ * optimization - don't waste any instructions or memory references
+ * asking if you're online or how many CPUs there are if there is
+ * only one CPU.
+ * 2) Most SMP arch's #define some of these maps to be some
+ * other map specific to that arch. Therefore, the following
+ * must be #define macros, not inlines. To see why, examine
+ * the assembly code produced by the following. Note that
+ * set1() writes phys_x_map, but set2() writes x_map:
+ * int x_map, phys_x_map;
+ * #define set1(a) x_map = a
+ * inline void set2(int a) { x_map = a; }
+ * #define x_map phys_x_map
+ * main(){ set1(3); set2(5); }
+ */
+
+extern cpumask_t cpu_possible_map;
+extern cpumask_t cpu_online_map;
+extern cpumask_t cpu_present_map;
+
+#define num_online_cpus() cpus_weight(cpu_online_map)
+#define num_possible_cpus() cpus_weight(cpu_possible_map)
+#define num_present_cpus() cpus_weight(cpu_present_map)
+#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
+#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
+#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
+
+int highest_possible_processor_id(void);
+#define any_online_cpu(mask) __any_online_cpu(&(mask))
+int __any_online_cpu(const cpumask_t *mask);
+
+#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
+#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
+#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
+#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+
+#endif
+#endif /* _LWK_CPUMASK_H */
--- /dev/null
+#ifndef _LWK_CTYPE_H
+#define _LWK_CTYPE_H
+
+/*
+ * NOTE! This ctype does not handle EOF like the standard C
+ * library is required to.
+ */
+
+#define _U 0x01 /* upper */
+#define _L 0x02 /* lower */
+#define _D 0x04 /* digit */
+#define _C 0x08 /* cntrl */
+#define _P 0x10 /* punct */
+#define _S 0x20 /* white space (space/lf/tab) */
+#define _X 0x40 /* hex digit */
+#define _SP 0x80 /* hard space (0x20) */
+
+extern unsigned char _ctype[];
+
+#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
+
+#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
+#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
+#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
+#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
+#define islower(c) ((__ismask(c)&(_L)) != 0)
+#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
+#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+#define isspace(c) ((__ismask(c)&(_S)) != 0)
+#define isupper(c) ((__ismask(c)&(_U)) != 0)
+#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
+
+#define isascii(c) (((unsigned char)(c))<=0x7f)
+#define toascii(c) (((unsigned char)(c))&0x7f)
+
+static inline unsigned char __tolower(unsigned char c)
+{
+ if (isupper(c))
+ c -= 'A'-'a';
+ return c;
+}
+
+static inline unsigned char __toupper(unsigned char c)
+{
+ if (islower(c))
+ c -= 'a'-'A';
+ return c;
+}
+
+#define tolower(c) __tolower(c)
+#define toupper(c) __toupper(c)
+
+#endif
--- /dev/null
+#ifndef _LWK_DELAY_H
+#define _LWK_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ */
+
+extern unsigned long loops_per_jiffy;
+
+#include <arch/delay.h>
+
+/*
+ * Using udelay() for intervals greater than a few milliseconds can
+ * risk overflow for high loops_per_jiffy (high bogomips) machines. The
+ * mdelay() provides a wrapper to prevent this. For delays greater
+ * than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture
+ * specific values can be defined in asm-???/delay.h as an override.
+ * The 2nd mdelay() definition ensures GCC will optimize away the
+ * while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G.
+ */
+
+#ifndef MAX_UDELAY_MS
+#define MAX_UDELAY_MS 5
+#endif
+
+#ifdef notdef
+#define mdelay(n) (\
+ {unsigned long __ms=(n); while (__ms--) udelay(1000);})
+#else
+#define mdelay(n) (\
+ (__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \
+ ({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
+#endif
+
+#ifndef ndelay
+#define ndelay(x) udelay(((x)+999)/1000)
+#endif
+
+void calibrate_delay(void);
+void msleep(unsigned int msecs);
+unsigned long msleep_interruptible(unsigned int msecs);
+
+static inline void ssleep(unsigned int seconds)
+{
+ msleep(seconds * 1000);
+}
+
+#endif /* defined(_LWK_DELAY_H) */
--- /dev/null
+#ifndef _LWK_DRIVER_H
+#define _LWK_DRIVER_H
+
+#include <lwk/kernel.h>
+#include <lwk/types.h>
+
+/*
+ * Drivers may set DRIVER_NAME before including this header.
+ * Otherwise, the KBUILD name is used.
+ */
+#ifndef DRIVER_NAME
+#define DRIVER_NAME KBUILD_MODNAME
+#endif
+
+/*
+ * Driver parameter names have the form:
+ *
+ * driver_name.parameter_name
+ *
+ * Example:
+ * To set integer parameter foo in driver bar, add this
+ * to the kernel boot command line:
+ *
+ * bar.foo=1
+ */
+#define __DRIVER_PARAM_PREFIX DRIVER_NAME "."
+
+/*
+ * For driver parameters. Parameters can be configured via the
+ * kernel boot command line or some other platform-dependent
+ * runtime mechanism.
+ */
+#include <lwk/params.h>
+
+#define driver_param(name, type) \
+ __param_named(__DRIVER_PARAM_PREFIX, name, name, type)
+
+#define driver_param_named(name, value, type) \
+ __param_named(__DRIVER_PARAM_PREFIX, name, value, type)
+
+#define driver_param_string(name, string, len) \
+ __param_string(__DRIVER_PARAM_PREFIX, name, string, len)
+
+#define driver_param_array(name, type, nump) \
+ __param_array_named(__DRIVER_PARAM_PREFIX, name, name, type, nump)
+
+#define driver_param_array_named(name, array, type, nump) \
+ __param_array_named(__DRIVER_PARAM_PREFIX, name, array, type, nump)
+
+/*
+ * Every driver defines one of these structures via the
+ * driver_init() macro. The structure gets placed in the
+ * __driver_table ELF section and the kernel walks this table
+ * to find/initialize all drivers in the system.
+ */
+struct driver_info {
+ const char * name; // name of the driver
+ void (*init)(void); // driver initialization function
+ int init_called; // set when .init() has been called,
+ // used to prevent double inits.
+};
+
+/*
+ * This adds a function to the __driver_table ELF section.
+ */
+#define driver_init(init_func) \
+ static char __driver_name[] = DRIVER_NAME; \
+ static struct driver_info const __driver_info \
+ __attribute_used__ \
+ __attribute__ ((unused,__section__ ("__driver_table"), \
+ aligned(sizeof(void *)))) \
+ = { __driver_name, init_func };
+
+/*
+ * The __driver_table ELF section is surrounded by these symbols,
+ * which are defined in the platform's linker script.
+ */
+extern struct driver_info __start___driver_table[];
+extern struct driver_info __stop___driver_table[];
+
+/*
+ * This is a placeholder. Currently drivers are never unloaded once loaded.
+ */
+#define driver_exit(exit_func)
+
+extern int driver_init_by_name(const char *name);
+
+#endif
--- /dev/null
+#ifndef _LWK_ELF_EM_H
+#define _LWK_ELF_EM_H
+
+/* These constants define the various ELF target machines */
+#define EM_NONE 0
+#define EM_X86_64 62 /* AMD x86-64 */
+
+#endif /* _LWK_ELF_EM_H */
--- /dev/null
+#ifndef _LWK_ELF_H
+#define _LWK_ELF_H
+
+#include <lwk/types.h>
+#include <lwk/task.h>
+#include <lwk/elf-em.h>
+#include <arch/elf.h>
+
+struct file;
+
+#ifndef elf_read_implies_exec
+ /* Executables for which elf_read_implies_exec() returns TRUE will
+ have the READ_IMPLIES_EXEC personality flag set automatically.
+ Override in asm/elf.h as needed. */
+# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0
+#endif
+
+/* 32-bit ELF base types. */
+typedef __u32 Elf32_Addr;
+typedef __u16 Elf32_Half;
+typedef __u32 Elf32_Off;
+typedef __s32 Elf32_Sword;
+typedef __u32 Elf32_Word;
+
+/* 64-bit ELF base types. */
+typedef __u64 Elf64_Addr;
+typedef __u16 Elf64_Half;
+typedef __s16 Elf64_SHalf;
+typedef __u64 Elf64_Off;
+typedef __s32 Elf64_Sword;
+typedef __u32 Elf64_Word;
+typedef __u64 Elf64_Xword;
+typedef __s64 Elf64_Sxword;
+
+/* These constants are for the segment types stored in the image headers */
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+#define PT_TLS 7 /* Thread local storage segment */
+#define PT_LOOS 0x60000000 /* OS-specific */
+#define PT_HIOS 0x6fffffff /* OS-specific */
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
+#define PT_GNU_EH_FRAME 0x6474e550
+
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
+
+/* These constants define the different elf file types */
+#define ET_NONE 0
+#define ET_REL 1
+#define ET_EXEC 2
+#define ET_DYN 3
+#define ET_CORE 4
+#define ET_LOPROC 0xff00
+#define ET_HIPROC 0xffff
+
+/* This is the info that is needed to parse the dynamic section of the file */
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+#define DT_ENCODING 32
+#define OLD_DT_LOOS 0x60000000
+#define DT_LOOS 0x6000000d
+#define DT_HIOS 0x6ffff000
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_VALRNGHI 0x6ffffdff
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_ADDRRNGHI 0x6ffffeff
+#define DT_VERSYM 0x6ffffff0
+#define DT_RELACOUNT 0x6ffffff9
+#define DT_RELCOUNT 0x6ffffffa
+#define DT_FLAGS_1 0x6ffffffb
+#define DT_VERDEF 0x6ffffffc
+#define DT_VERDEFNUM 0x6ffffffd
+#define DT_VERNEED 0x6ffffffe
+#define DT_VERNEEDNUM 0x6fffffff
+#define OLD_DT_HIOS 0x6fffffff
+#define DT_LOPROC 0x70000000
+#define DT_HIPROC 0x7fffffff
+
+/* This info is needed when parsing the symbol table */
+#define STB_LOCAL 0
+#define STB_GLOBAL 1
+#define STB_WEAK 2
+
+#define STT_NOTYPE 0
+#define STT_OBJECT 1
+#define STT_FUNC 2
+#define STT_SECTION 3
+#define STT_FILE 4
+#define STT_COMMON 5
+#define STT_TLS 6
+
+#define ELF_ST_BIND(x) ((x) >> 4)
+#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
+
+typedef struct dynamic{
+ Elf32_Sword d_tag;
+ union{
+ Elf32_Sword d_val;
+ Elf32_Addr d_ptr;
+ } d_un;
+} Elf32_Dyn;
+
+typedef struct {
+ Elf64_Sxword d_tag; /* entry tag value */
+ union {
+ Elf64_Xword d_val;
+ Elf64_Addr d_ptr;
+ } d_un;
+} Elf64_Dyn;
+
+/* The following are used with relocations */
+#define ELF32_R_SYM(x) ((x) >> 8)
+#define ELF32_R_TYPE(x) ((x) & 0xff)
+
+#define ELF64_R_SYM(i) ((i) >> 32)
+#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
+
+typedef struct elf32_rel {
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+} Elf32_Rel;
+
+typedef struct elf64_rel {
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+} Elf64_Rel;
+
+typedef struct elf32_rela{
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
+} Elf32_Rela;
+
+typedef struct elf64_rela {
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Sxword r_addend; /* Constant addend used to compute value */
+} Elf64_Rela;
+
+typedef struct elf32_sym{
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
+} Elf32_Sym;
+
+typedef struct elf64_sym {
+ Elf64_Word st_name; /* Symbol name, index in string tbl */
+ unsigned char st_info; /* Type and binding attributes */
+ unsigned char st_other; /* No defined meaning, 0 */
+ Elf64_Half st_shndx; /* Associated section index */
+ Elf64_Addr st_value; /* Value of the symbol */
+ Elf64_Xword st_size; /* Associated symbol size */
+} Elf64_Sym;
+
+
+#define EI_NIDENT 16
+
+typedef struct elf32_hdr{
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry; /* Entry point */
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
+} Elf32_Ehdr;
+
+typedef struct elf64_hdr {
+ unsigned char e_ident[16]; /* ELF "magic number" */
+ Elf64_Half e_type;
+ Elf64_Half e_machine;
+ Elf64_Word e_version;
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Word e_flags;
+ Elf64_Half e_ehsize;
+ Elf64_Half e_phentsize;
+ Elf64_Half e_phnum;
+ Elf64_Half e_shentsize;
+ Elf64_Half e_shnum;
+ Elf64_Half e_shstrndx;
+} Elf64_Ehdr;
+
+/* These constants define the permissions on sections in the program
+ header, p_flags. */
+#define PF_R 0x4
+#define PF_W 0x2
+#define PF_X 0x1
+
+typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
+} Elf32_Phdr;
+
+typedef struct elf64_phdr {
+ Elf64_Word p_type;
+ Elf64_Word p_flags;
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment, file & memory */
+} Elf64_Phdr;
+
+/* sh_type */
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+#define SHT_NUM 12
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+
+/* sh_flags */
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_MASKPROC 0xf0000000
+
+/* special section indexes */
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+
+typedef struct {
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
+} Elf32_Shdr;
+
+typedef struct elf64_shdr {
+ Elf64_Word sh_name; /* Section name, index in string tbl */
+ Elf64_Word sh_type; /* Type of section */
+ Elf64_Xword sh_flags; /* Miscellaneous section attributes */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Size of section in bytes */
+ Elf64_Word sh_link; /* Index of another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
+} Elf64_Shdr;
+
+#define EI_MAG0 0 /* e_ident[] indexes */
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_OSABI 7
+#define EI_PAD 8
+
+#define ELFMAG0 0x7f /* EI_MAG */
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF"
+#define SELFMAG 4
+
+#define ELFCLASSNONE 0 /* EI_CLASS */
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+#define ELFCLASSNUM 3
+
+#define ELFDATANONE 0 /* e_ident[EI_DATA] */
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
+
+#define EV_NONE 0 /* e_version, EI_VERSION */
+#define EV_CURRENT 1
+#define EV_NUM 2
+
+#define ELFOSABI_NONE 0
+#define ELFOSABI_LINUX 3
+
+#ifndef ELF_OSABI
+#define ELF_OSABI ELFOSABI_NONE
+#endif
+
+/* Notes used in ET_CORE */
+#define NT_PRSTATUS 1
+#define NT_PRFPREG 2
+#define NT_PRPSINFO 3
+#define NT_TASKSTRUCT 4
+#define NT_AUXV 6
+#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
+
+
+/* Note header in a PT_NOTE section */
+typedef struct elf32_note {
+ Elf32_Word n_namesz; /* Name size */
+ Elf32_Word n_descsz; /* Content size */
+ Elf32_Word n_type; /* Content type */
+} Elf32_Nhdr;
+
+/* Note header in a PT_NOTE section */
+typedef struct elf64_note {
+ Elf64_Word n_namesz; /* Name size */
+ Elf64_Word n_descsz; /* Content size */
+ Elf64_Word n_type; /* Content type */
+} Elf64_Nhdr;
+
+#if ELF_CLASS == ELFCLASS32
+
+extern Elf32_Dyn _DYNAMIC [];
+#define elfhdr elf32_hdr
+#define elf_phdr elf32_phdr
+#define elf_note elf32_note
+#define elf_addr_t Elf32_Off
+
+#else
+
+extern Elf64_Dyn _DYNAMIC [];
+#define elfhdr elf64_hdr
+#define elf_phdr elf64_phdr
+#define elf_note elf64_note
+#define elf_addr_t Elf64_Off
+
+#endif
+
+#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+static inline int arch_notes_size(void) { return 0; }
+static inline void arch_write_notes(struct file *file) { }
+
+#define ELF_CORE_EXTRA_NOTES_SIZE arch_notes_size()
+#define ELF_CORE_WRITE_EXTRA_NOTES arch_write_notes(file)
+#endif /* ARCH_HAVE_EXTRA_ELF_NOTES */
+
+#include <lwk/types.h>
+#include <lwk/idspace.h>
+#include <lwk/aspace.h>
+
+int elf_check_hdr(const struct elfhdr *hdr);
+void elf_print_elfhdr(const struct elfhdr *hdr);
+void elf_print_phdr(const struct elf_phdr *hdr);
+vmflags_t elf_pflags_to_vmflags(unsigned int elf_flags);
+vaddr_t elf_entry_point(const void *elf_image);
+vaddr_t elf_phdr_table_addr(const void *elf_image);
+unsigned int elf_num_phdrs(const void *elf_image);
+vaddr_t elf_heap_start(const void *elf_image);
+int elf_init_str_array(size_t size, char *ptrs[], char *str);
+paddr_t elf_dflt_alloc_pmem(size_t size, size_t alignment, uintptr_t arg);
+
+int
+elf_init_stack(
+ void * elf_image,
+ void * stack_mapping,
+ vaddr_t stack_start,
+ size_t stack_extent,
+ char * argv[],
+ char * envp[],
+ uid_t uid,
+ gid_t gid,
+ uint32_t hwcap,
+ vaddr_t * stack_ptr
+);
+
+int
+elf_load_executable(
+ void * elf_image,
+ paddr_t elf_image_paddr,
+ id_t aspace_id,
+ vmpagesize_t pagesz,
+ uintptr_t alloc_pmem_arg,
+ paddr_t (*alloc_pmem)(size_t size, size_t alignment, uintptr_t arg)
+);
+
+int
+elf_load(
+ void * elf_image,
+ paddr_t elf_image_paddr,
+ const char * name,
+ id_t desired_aspace_id,
+ vmpagesize_t pagesz,
+ size_t heap_size,
+ size_t stack_size,
+ char * argv_str,
+ char * envp_str,
+ start_state_t * start_state,
+ uintptr_t alloc_pmem_arg,
+ paddr_t (*alloc_pmem)(size_t size, size_t alignment, uintptr_t arg)
+);
+
+/**
+ * ELF related system calls.
+ */
+extern int elf_hwcap(id_t cpu, uint32_t *hwcap);
+
+#ifdef __KERNEL__
+extern int sys_elf_hwcap(id_t cpu, uint32_t __user *hwcap);
+#endif
+
+#endif /* _LWK_ELF_H */
--- /dev/null
+#ifndef _LWK_ERRNO_H
+#define _LWK_ERRNO_H
+
+#include <arch/errno.h>
+
+#ifdef __KERNEL__
+
+/* Should never be seen by user programs */
+#define ERESTARTSYS 512
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514 /* restart if no handler.. */
+#define ENOIOCTLCMD 515 /* No ioctl command */
+#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
+
+/* Defined for the NFSv3 protocol */
+#define EBADHANDLE 521 /* Illegal NFS file handle */
+#define ENOTSYNC 522 /* Update synchronization mismatch */
+#define EBADCOOKIE 523 /* Cookie is stale */
+#define ENOTSUPP 524 /* Operation is not supported */
+#define ETOOSMALL 525 /* Buffer or request is too small */
+#define ESERVERFAULT 526 /* An untranslatable error occurred */
+#define EBADTYPE 527 /* Type not supported by server */
+#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
+#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
+#define EIOCBRETRY 530 /* iocb queued, will trigger a retry */
+
+#endif
+
+#endif
--- /dev/null
+#ifndef _LWK_EXTABLE_H
+#define _LWK_EXTABLE_H
+
+#include <lwk/init.h>
+#include <arch/extable.h>
+
+extern void __init
+sort_exception_table(void);
+
+extern const struct exception_table_entry *
+search_exception_table(unsigned long);
+
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value);
+
+void
+sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+
+#endif
--- /dev/null
+#ifndef _LINUX_HASH_H
+#define _LINUX_HASH_H
+/* Fast hashing routine for ints, longs and pointers.
+ (C) 2002 William Lee Irwin III, IBM */
+
+/*
+ * Knuth recommends primes in approximately golden ratio to the maximum
+ * integer representable by a machine word for multiplicative hashing.
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * These primes are chosen to be bit-sparse, that is operations on
+ * them can use shifts and additions instead of multiplications for
+ * machines where multiplications are slow.
+ */
+
+#include <arch/types.h>
+
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
+#else
+#error Wordsize not 32 or 64
+#endif
+
+static inline u64 hash_64(u64 val, unsigned int bits)
+{
+ u64 hash = val;
+
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ u64 n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+
+ /* High bits are more random, so use them. */
+ return hash >> (64 - bits);
+}
+
+static inline u32 hash_32(u32 val, unsigned int bits)
+{
+ /* On some cpus multiply is faster, on others gcc will do shifts */
+ u32 hash = val * GOLDEN_RATIO_PRIME_32;
+
+ /* High bits are more random, so use them. */
+ return hash >> (32 - bits);
+}
+
+static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
+{
+ return hash_long((unsigned long)ptr, bits);
+}
+#endif /* _LINUX_HASH_H */
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#ifndef _LWK_HTABLE_H
+#define _LWK_HTABLE_H
+
+#include <lwk/idspace.h>
+
+/**
+ * Hash table object.
+ */
+typedef void * htable_t;
+
+/**
+ * Hash table API.
+ */
+extern int htable_create(size_t tbl_order,
+ size_t obj_key_offset, size_t obj_link_offset,
+ htable_t *tbl);
+extern int htable_destroy(htable_t tbl);
+extern int htable_add(htable_t tbl, void *obj);
+extern int htable_del(htable_t tbl, void *obj);
+extern void *htable_lookup(htable_t tbl, id_t key);
+
+#endif
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#ifndef _LWK_IDSPACE_H
+#define _LWK_IDSPACE_H
+
+/**
+ * ID space object.
+ * An ID space consists of a range of IDs and keeps track of which are
+ * allocated and which are available for allocation.
+ */
+typedef void * idspace_t;
+
+/**
+ * Numeric identifier type.
+ */
+typedef unsigned int id_t;
+
+/**
+ * Used to request any available ID... pass as 'request' arg to id_alloc().
+ */
+#define ANY_ID (-1)
+
+/**
+ * ID space API.
+ */
+int idspace_create(id_t min_id, id_t max_id, idspace_t *idspace);
+int idspace_destroy(idspace_t idspace);
+int idspace_alloc_id(idspace_t idspace, id_t request, id_t *id);
+int idspace_free_id(idspace_t idspace, id_t id);
+
+#endif
--- /dev/null
+#ifndef _LWK_INIT_H
+#define _LWK_INIT_H
+
+#include <lwk/compiler.h>
+
+/* These macros are used to mark some functions or
+ * initialized data (doesn't apply to uninitialized data)
+ * as `initialization' functions. The kernel can take this
+ * as hint that the function is used only during the initialization
+ * phase and free up used memory resources after
+ *
+ * Usage:
+ * For functions:
+ *
+ * You should add __init immediately before the function name, like:
+ *
+ * static void __init initme(int x, int y)
+ * {
+ * extern int z; z = x * y;
+ * }
+ *
+ * If the function has a prototype somewhere, you can also add
+ * __init between closing brace of the prototype and semicolon:
+ *
+ * extern int initialize_foobar_device(int, int, int) __init;
+ *
+ * For initialized data:
+ * You should insert __initdata between the variable name and equal
+ * sign followed by value, e.g.:
+ *
+ * static int init_variable __initdata = 0;
+ * static char linux_logo[] __initdata = { 0x32, 0x36, ... };
+ *
+ * Don't forget to initialize data not at file scope, i.e. within a function,
+ * as gcc otherwise puts the data into the bss section and not into the init
+ * section.
+ *
+ * Also note, that this data cannot be "const".
+ */
+
+/* These are for everybody (although not all archs will actually
+ discard it in modules) */
+#define __init __attribute__ ((__section__ (".init.text")))
+#define __meminit __init
+#define __cpuinit __init
+#define __initdata __attribute__ ((__section__ (".init.data")))
+#define __exitdata __attribute__ ((__section__(".exit.data")))
+#define __exit_call __attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
+#define __exit __attribute_used__ __attribute__ ((__section__(".exit.text")))
+#define __cpuinitdata __initdata
+
+/* For assembly routines */
+#define __INIT .section ".init.text","ax"
+#define __FINIT .previous
+#define __INITDATA .section ".init.data","aw"
+
+/* TODO: move this */
+#define COMMAND_LINE_SIZE 1024
+
+#ifndef __ASSEMBLY__
+
+#include <lwk/types.h>
+
+/* Defined in init/main.c */
+extern char lwk_command_line[COMMAND_LINE_SIZE];
+
+/* used by init/main.c */
+extern void setup_arch(void);
+
+extern void start_kernel(void);
+
+extern int create_init_task(void);
+extern paddr_t init_elf_image;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _LWK_INIT_H */
--- /dev/null
+#ifndef _LWK_INIT_TASK_H
+#define _LWK_INIT_TASK_H
+
+/**
+ * Initializes architecture-independent fields in the initial address space.
+ */
+#define BOOTSTRAP_ASPACE(name)
+
+/**
+ * Initializes architecture-independent fields in the initial task structure.
+ */
+#define BOOTSTRAP_TASK(task_info) \
+ .id = 0, \
+ .name = "bootstrap", \
+ .cpu_id = 0, \
+ .aspace = &bootstrap_aspace, \
+ .sched_link = LIST_HEAD_INIT(task_info.sched_link), \
+
+#define bootstrap_task bootstrap_task_union.task_info
+#define bootstrap_stack bootstrap_task_union.stack
+
+#endif
--- /dev/null
+#ifndef _LWK_INTERRUPT_H
+#define _LWK_INTERRUPT_H
+
+/**
+ * IRQ handler return type.
+ *
+ * IRQ_NONE means we didn't handle the interrupt.
+ * IRQ_HANDLED means we did handle the interrupt.
+ * IRQ_RETVAL(x) returns IRQ_HANDLED if x is non-zero, IRQ_NONE otherwise.
+ */
+typedef int irqreturn_t;
+#define IRQ_NONE (0)
+#define IRQ_HANDLED (1)
+#define IRQ_RETVAL ((x) != 0)
+
+/**
+ * IRQ handler prototype.
+ */
+typedef irqreturn_t (*irq_handler_t)(unsigned int irq, void *dev_id);
+
+/**
+ * Registers an interrupt handler.
+ */
+extern int request_irq(unsigned int irq,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+
+/**
+ * Unregisters an interrupt handler.
+ */
+extern void free_irq(unsigned int irq, void *dev_id);
+
+#endif
--- /dev/null
+/* Rewritten and vastly simplified by Rusty Russell for in-kernel
+ * module loader:
+ * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ */
+#ifndef _LWK_KALLSYMS_H
+#define _LWK_KALLSYMS_H
+
+#define KSYM_NAME_LEN 127
+
+unsigned long kallsyms_lookup_name(const char *name);
+
+const char *kallsyms_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char *namebuf);
+
+#endif /* _LWK_KALLSYMS_H */
--- /dev/null
+#ifndef _LWK_KERNEL_H
+#define _LWK_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often used function prototypes etc
+ */
+
+#include <stdarg.h>
+#include <lwk/linkage.h>
+#include <lwk/stddef.h>
+#include <lwk/types.h>
+#include <lwk/compiler.h>
+#include <lwk/kmem.h>
+#include <lwk/errno.h>
+#include <lwk/utsname.h>
+#include <lwk/print.h>
+#include <arch/byteorder.h>
+#include <arch/bug.h>
+
+extern const char lwk_banner[];
+extern struct utsname linux_utsname;
+
+#define INT_MAX ((int)(~0U>>1))
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define LONG_MIN (-LONG_MAX - 1)
+#define ULONG_MAX (~0UL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+void panic(const char * fmt, ...);
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+extern int get_option(char **str, int *pint);
+extern char *get_options(const char *str, int nints, int *ints);
+extern unsigned long long memparse(char *ptr, char **retptr);
+
+/*
+ * min()/max() macros that also do
+ * strict type-checking.. See the
+ * "unnecessary" pointer comparison.
+ */
+#define min(x,y) ({ \
+ typeof(x) _x = (x); \
+ typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#define max(x,y) ({ \
+ typeof(x) _x = (x); \
+ typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+/*
+ * ..and if you can't take the strict
+ * types, you can specify one yourself.
+ *
+ * Or not use min/max at all, of course.
+ */
+#define min_t(type,x,y) \
+ ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
+#define max_t(type,x,y) \
+ ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
+
+/*
+ * Round x up to the nearest y aligned boundary. y must be a power of two.
+ */
+#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
+
+/*
+ * Round x down to the nearest y aligned boundary. y must be a power of two.
+ */
+#define round_down(x,y) ((x) & ~((y)-1))
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+/*
+ * Check at compile time that something is of a particular type.
+ * Always evaluates to 1 so you may use it easily in comparisons.
+ */
+#define typecheck(type,x) \
+({ type __dummy; \
+ typeof(x) __dummy2; \
+ (void)(&__dummy == &__dummy2); \
+ 1; \
+})
+
+/*
+ * Check at compile time that 'function' is a certain type, or is a pointer
+ * to that type (needs to use typedef for the function type.)
+ */
+#define typecheck_fn(type,function) \
+({ typeof(type) __tmp = function; \
+ (void)__tmp; \
+})
+
+/*
+ * Check at compile time that 'type' is a multiple of align.
+ */
+#define aligncheck(type,align) \
+ extern int __align_check[ (sizeof(type) % (align) == 0 ? 0 : 1/0) ]
+
+/*
+ * Check at compile time that the type 'type' has the expected 'size'.
+ * NOTE: 'type' must be a single word, so this doesn't work for structures.
+ * For structures, use sizecheck_struct().
+ */
+#define sizecheck(type,size) \
+ extern int __size_check_##type[ (sizeof(type) == (size) ? 0 : 1/0) ]
+
+/*
+ * Check at compile time that the structure 'name' has the expected size 'size'.
+ */
+#define sizecheck_struct(name,size) \
+ extern int __size_check_struct_##name[ (sizeof(struct name) == (size) ? 0 : 1/0) ]
+
+/*
+ * Force a compilation error if condition is true
+ */
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+
+#endif
--- /dev/null
+/* Copyright (c) 2007, Sandia National Laboratories */
+
+#ifndef _LWK_KMEM_H
+#define _LWK_KMEM_H
+
+void kmem_create_zone(unsigned long base_addr, size_t size);
+void kmem_add_memory(unsigned long base_addr, size_t size);
+
+void *kmem_alloc(size_t size);
+void kmem_free(void *addr);
+
+void * kmem_get_pages(unsigned long order);
+void kmem_free_pages(void *addr, unsigned long order);
+
+#endif
--- /dev/null
+../../user/liblwk/include/lwk/liblwk.h
\ No newline at end of file
--- /dev/null
+#ifndef _LWK_LINKAGE_H
+#define _LWK_LINKAGE_H
+
+#include <arch/linkage.h>
+
+#ifdef __cplusplus
+#define CPP_ASMLINKAGE extern "C"
+#else
+#define CPP_ASMLINKAGE
+#endif
+
+#ifndef asmlinkage
+#define asmlinkage CPP_ASMLINKAGE
+#endif
+
+#ifndef prevent_tail_call
+# define prevent_tail_call(ret) do { } while (0)
+#endif
+
+#ifndef __ALIGN
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define ALIGN __ALIGN
+#define ALIGN_STR __ALIGN_STR
+
+#ifndef ENTRY
+#define ENTRY(name) \
+ .globl name; \
+ ALIGN; \
+ name:
+#endif
+
+#define KPROBE_ENTRY(name) \
+ .section .kprobes.text, "ax"; \
+ ENTRY(name)
+
+#ifndef END
+#define END(name) \
+ .size name, .-name
+#endif
+
+#ifndef ENDPROC
+#define ENDPROC(name) \
+ .type name, @function; \
+ END(name)
+#endif
+
+#endif
+
+#define NORET_TYPE /**/
+#define ATTRIB_NORET __attribute__((noreturn))
+#define NORET_AND noreturn,
+
+#ifndef FASTCALL
+#define FASTCALL(x) x
+#define fastcall
+#endif
+
+#endif
--- /dev/null
+#ifndef _LWK_LINUX_COMPAT_H
+#define _LWK_LINUX_COMPAT_H
+
+/* Mostly Linux stuff that is not implemented or supported in LWK */
+
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
+#define EXPORT_SYMBOL_GPL_FUTURE(sym)
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
+#endif
--- /dev/null
+#ifndef _LWK_LIST_H
+#define _LWK_LIST_H
+
+#ifdef __KERNEL__
+
+#include <lwk/stddef.h>
+#include <lwk/prefetch.h>
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100)
+#define LIST_POISON2 ((void *) 0x00200200)
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+static inline void list_head_init(struct list_head *list)
+{
+ INIT_LIST_HEAD(list);
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_empty_careful - tests whether a list is
+ * empty _and_ checks that no other CPU might be
+ * in the process of still modifying either member
+ *
+ * NOTE: using list_empty_careful() without synchronization
+ * can only be safe if the only activity that can happen
+ * to the list entry is list_del_init(). Eg. it cannot be used
+ * if another CPU could re-list_add() it.
+ *
+ * @head: the list to test.
+ */
+static inline int list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+ return (next == head) && (next == head->prev);
+}
+
+static inline void __list_splice(struct list_head *list,
+ struct list_head *head)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+}
+
+/**
+ * list_splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(struct list_head *list, struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; prefetch(pos->next), pos != (head); \
+ pos = pos->next)
+
+/**
+ * __list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ *
+ * This variant differs from list_for_each() in that it's the
+ * simplest possible list iteration code, no prefetching is done.
+ * Use this for code that knows the list to be very short (empty
+ * or 1 entry) most of the time.
+ */
+#define __list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_prev - iterate over a list backwards
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
+ pos = pos->prev)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop counter.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member); \
+ prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member); \
+ prefetch(pos->member.prev), &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+/**
+ * list_prepare_entry - prepare a pos entry for use as a start point in
+ * list_for_each_entry_continue
+ * @pos: the type * to use as a start point
+ * @head: the head of the list
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_prepare_entry(pos, head, member) \
+ ((pos) ? : list_entry(head, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue - iterate over list of given type
+ * continuing after existing point
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = list_entry(pos->member.next, typeof(*pos), member); \
+ prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_from - iterate over list of given type
+ * continuing from existing point
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_from(pos, head, member) \
+ for (; prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_continue - iterate over list of given type
+ * continuing after existing point safe against removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe_continue(pos, n, head, member) \
+ for (pos = list_entry(pos->member.next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_from - iterate over list of given type
+ * from existing point safe against removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
+ * removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
+ n = list_entry(pos->member.prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+static inline void hlist_node_init(struct hlist_node *h)
+{
+ INIT_HLIST_NODE(h);
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_after(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ next->next = n->next;
+ n->next = next;
+ next->pprev = &n->next;
+
+ if(next->next)
+ next->next->pprev = &next->next;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
+ pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(tpos, pos, member) \
+ for (pos = (pos)->next; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(tpos, pos, member) \
+ for (; pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @n: another &struct hlist_node to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = n)
+
+#else
+#warning "don't include kernel headers in userspace"
+#endif /* __KERNEL__ */
+#endif
--- /dev/null
+/* Integer base 2 logarithm calculation
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LWK_LOG2_H
+#define _LWK_LOG2_H
+
+#include <lwk/types.h>
+#include <lwk/bitops.h>
+
+/*
+ * deal with unrepresentable constant logarithms
+ */
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+
+/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ * more efficiently than using fls() and fls64()
+ * - the arch is not required to handle n==0 if implementing the fallback
+ */
+#ifndef CONFIG_ARCH_HAS_ILOG2_U32
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+ return fls(n) - 1;
+}
+#endif
+
+#ifndef CONFIG_ARCH_HAS_ILOG2_U64
+static inline __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+ return fls64(n) - 1;
+}
+#endif
+
+/*
+ * Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+/*
+ * round up to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __roundup_pow_of_two(unsigned long n)
+{
+ return 1UL << fls_long(n - 1);
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (n) < 1 ? ____ilog2_NaN() : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ (n) & (1ULL << 1) ? 1 : \
+ (n) & (1ULL << 0) ? 0 : \
+ ____ilog2_NaN() \
+ ) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
+ )
+
+/**
+ * roundup_pow_of_two - round the given value up to nearest power of two
+ * @n - parameter
+ *
+ * round the given value up to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define roundup_pow_of_two(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (n == 1) ? 1 : \
+ (1UL << (ilog2((n) - 1) + 1)) \
+ ) : \
+ __roundup_pow_of_two(n) \
+ )
+
+#endif /* _LWK_LOG2_H */
--- /dev/null
+#ifndef _LWK_PARAMS_H
+#define _LWK_PARAMS_H
+/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
+#include <lwk/init.h>
+#include <lwk/stringify.h>
+#include <lwk/kernel.h>
+
+struct kernel_param;
+
+/* Returns 0, or -errno. arg is in kp->arg. */
+typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
+/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
+typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
+
+struct kernel_param {
+ const char * name;
+ param_set_fn set;
+ param_get_fn get;
+ void * arg;
+};
+
+/* Special one for strings we want to copy into */
+struct kparam_string {
+ unsigned int maxlen;
+ char * string;
+};
+
+/* Special one for arrays */
+struct kparam_array {
+ unsigned int max;
+ unsigned int * num;
+ param_set_fn set;
+ param_get_fn get;
+ unsigned int elemsize;
+ void * elem;
+};
+
+/* This is the fundamental function for registering kernel parameters. */
+#define __param_call(prefix, name, set, get, arg) \
+ static char __param_str_##name[] = prefix #name; \
+ static struct kernel_param const __param_##name \
+ __attribute_used__ \
+ __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
+ = { __param_str_##name, set, get, arg }
+
+/* Helper functions: type is byte, short, ushort, int, uint, long,
+ ulong, charp, bool or invbool, or XXX if you define param_get_XXX,
+ param_set_XXX and param_check_XXX. */
+#define __param_named(prefix, name, value, type) \
+ param_check_##type(name, &(value)); \
+ __param_call(prefix, name, param_set_##type, param_get_##type, &value)
+
+/* Actually copy string: maxlen param is usually sizeof(string). */
+#define __param_string(prefix, name, string, len) \
+ static struct kparam_string __param_string_##name \
+ = { len, string }; \
+ __param_call(prefix, name, param_set_copystring, \
+ param_get_string, &__param_string_##name)
+
+/* Comma-separated array: *nump is set to number they actually specified. */
+#define __param_array_named(prefix, name, array, type, nump) \
+ static struct kparam_array __param_arr_##name \
+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
+ sizeof(array[0]), array }; \
+ __param_call(prefix, name, param_array_set, param_array_get, \
+ &__param_arr_##name)
+
+/* Call a function to parse the parameter */
+#define __param_func(prefix, name, func) \
+ __param_call(prefix, name, func, NULL, NULL)
+
+/*
+ * Basic parameter interface. These are just raw... they have no prefix.
+ */
+#define param(name, type) \
+ __param_named("", name, name, type)
+
+#define param_named(name, value, type) \
+ __param_named("", name, value, type)
+
+#define param_string(name, string, len) \
+ __param_string("", name, string, len)
+
+#define param_array(name, type, nump) \
+ __param_array_named("", name, name, type, nump)
+
+#define param_array_named(name, array, type, nump) \
+ __param_array_named("", name, array, type, nump)
+
+#define param_func(name, func) \
+ __param_func("", name, func)
+
+/* Called at kernel boot */
+extern int parse_args(const char *name,
+ char *args,
+ struct kernel_param *params,
+ unsigned num,
+ int (*unknown)(char *param, char *val));
+
+/* All the helper functions */
+/* The macros to do compile-time type checking stolen from Jakub
+ Jelinek, who IIRC came up with this idea for the 2.4 module init code. */
+#define __param_check(name, p, type) \
+ static inline type *__check_##name(void) { return(p); }
+
+extern int param_set_byte(const char *val, struct kernel_param *kp);
+extern int param_get_byte(char *buffer, struct kernel_param *kp);
+#define param_check_byte(name, p) __param_check(name, p, unsigned char)
+
+extern int param_set_short(const char *val, struct kernel_param *kp);
+extern int param_get_short(char *buffer, struct kernel_param *kp);
+#define param_check_short(name, p) __param_check(name, p, short)
+
+extern int param_set_ushort(const char *val, struct kernel_param *kp);
+extern int param_get_ushort(char *buffer, struct kernel_param *kp);
+#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
+
+extern int param_set_int(const char *val, struct kernel_param *kp);
+extern int param_get_int(char *buffer, struct kernel_param *kp);
+#define param_check_int(name, p) __param_check(name, p, int)
+
+extern int param_set_uint(const char *val, struct kernel_param *kp);
+extern int param_get_uint(char *buffer, struct kernel_param *kp);
+#define param_check_uint(name, p) __param_check(name, p, unsigned int)
+
+extern int param_set_long(const char *val, struct kernel_param *kp);
+extern int param_get_long(char *buffer, struct kernel_param *kp);
+#define param_check_long(name, p) __param_check(name, p, long)
+
+extern int param_set_ulong(const char *val, struct kernel_param *kp);
+extern int param_get_ulong(char *buffer, struct kernel_param *kp);
+#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
+
+extern int param_set_charp(const char *val, struct kernel_param *kp);
+extern int param_get_charp(char *buffer, struct kernel_param *kp);
+#define param_check_charp(name, p) __param_check(name, p, char *)
+
+extern int param_set_bool(const char *val, struct kernel_param *kp);
+extern int param_get_bool(char *buffer, struct kernel_param *kp);
+#define param_check_bool(name, p) __param_check(name, p, int)
+
+extern int param_set_invbool(const char *val, struct kernel_param *kp);
+extern int param_get_invbool(char *buffer, struct kernel_param *kp);
+#define param_check_invbool(name, p) __param_check(name, p, int)
+
+extern int param_array_set(const char *val, struct kernel_param *kp);
+extern int param_array_get(char *buffer, struct kernel_param *kp);
+
+extern int param_set_copystring(const char *val, struct kernel_param *kp);
+extern int param_get_string(char *buffer, struct kernel_param *kp);
+
+extern int parse_params(const char *str);
+extern int param_set_by_name_int(char *param, int val);
+
+/*
+ * These two symbols are defined by the platform's linker script.
+ * They surround a table of kernel parameter descriptors. This table
+ * is used by the command line parser to determine how each argument
+ * should be handled... each encountered argument causes a search of
+ * this table.
+ */
+extern struct kernel_param __start___param[], __stop___param[];
+
+#endif /* _LWK_PARAMS_H */
--- /dev/null
+#ifndef _LWK_PERCPU_H
+#define _LWK_PERCPU_H
+#include <lwk/smp.h>
+#include <lwk/string.h>
+#include <arch/percpu.h>
+
+/* Enough to cover all DEFINE_PER_CPU()'s in kernel. */
+#ifndef PERCPU_ENOUGH_ROOM
+#define PERCPU_ENOUGH_ROOM 32768
+#endif
+
+/* Must be an lvalue. */
+#define get_cpu_var(var) __get_cpu_var(var)
+#define put_cpu_var(var)
+
+struct percpu_data {
+ void *ptrs[NR_CPUS];
+};
+
+/*
+ * Use this to get to a cpu's version of the per-cpu object allocated using
+ * alloc_percpu. Non-atomic access to the current CPU's version should
+ * probably be combined with get_cpu()/put_cpu().
+ */
+#define per_cpu_ptr(ptr, cpu) \
+({ \
+ struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \
+ (__typeof__(ptr))__p->ptrs[(cpu)]; \
+})
+
+extern void *__alloc_percpu(size_t size);
+extern void free_percpu(const void *);
+
+/* Simple wrapper for the common case: zeros memory. */
+#define alloc_percpu(type) ((type *)(__alloc_percpu(sizeof(type))))
+
+extern void setup_per_cpu_areas(void);
+
+#endif /* _LWK_PERCPU_H */
--- /dev/null
+#ifndef _LWK_PFN_H
+#define _LWK_PFN_H
+
+#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+
+#endif
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#ifndef _LWK_PMEM_H
+#define _LWK_PMEM_H
+
+/**
+ * Physical memory types.
+ */
+typedef enum {
+ PMEM_TYPE_BOOTMEM = 0, /* memory allocated at boot-time */
+ PMEM_TYPE_BIGPHYSAREA = 1, /* memory set-aside for a device/driver */
+ PMEM_TYPE_INITRD = 2, /* initrd image provided by bootloader */
+ PMEM_TYPE_INIT_TASK = 3, /* memory used by the initial task */
+ PMEM_TYPE_KMEM = 4, /* memory managed by the kernel */
+ PMEM_TYPE_UMEM = 5, /* memory managed by user-space */
+} pmem_type_t;
+
+/**
+ * Defines a physical memory region.
+ */
+struct pmem_region {
+ paddr_t start; /* region occupies: [start, end) */
+ paddr_t end;
+
+ bool type_is_set; /* type field is set? */
+ pmem_type_t type; /* physical memory type */
+
+ bool lgroup_is_set; /* lgroup field is set? */
+ lgroup_t lgroup; /* locality group region is in */
+
+ bool allocated_is_set; /* allocated field set? */
+ bool allocated; /* region is allocated? */
+
+ bool name_is_set; /* name field is set? */
+ char name[32]; /* human-readable name of region */
+
+};
+
+/**
+ * Core physical memory management functions.
+ */
+int pmem_add(const struct pmem_region *rgn);
+int pmem_update(const struct pmem_region *update);
+int pmem_query(const struct pmem_region *query, struct pmem_region *result);
+int pmem_alloc(size_t size, size_t alignment,
+ const struct pmem_region *constraint,
+ struct pmem_region *result);
+
+/**
+ * Convenience functions.
+ */
+void pmem_region_unset_all(struct pmem_region *rgn);
+const char *pmem_type_to_string(pmem_type_t type);
+int pmem_alloc_umem(size_t size, size_t alignment, struct pmem_region *rgn);
+bool pmem_is_umem(paddr_t start, size_t extent);
+
+#ifdef __KERNEL__
+
+/**
+ * System call handlers.
+ */
+int sys_pmem_add(const struct pmem_region __user * rgn);
+int sys_pmem_update(const struct pmem_region __user * update);
+int sys_pmem_query(const struct pmem_region __user * query,
+ struct pmem_region __user * result);
+int sys_pmem_alloc(size_t size, size_t alignment,
+ const struct pmem_region __user *constraint,
+ struct pmem_region __user *result);
+
+#endif
+#endif
--- /dev/null
+#ifndef _LWK_POSIX_TYPES_H
+#define _LWK_POSIX_TYPES_H
+
+#include <lwk/stddef.h>
+
+/*
+ * This allows for 1024 file descriptors: if NR_OPEN is ever grown
+ * beyond that you'll have to change this too. But 1024 fd's seem to be
+ * enough even for such "real" unices like OSF/1, so hopefully this is
+ * one limit that doesn't have to be changed [again].
+ *
+ * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
+ * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
+ * place for them. Solved by having dummy defines in <sys/time.h>.
+ */
+
+/*
+ * Those macros may have been defined in <gnu/types.h>. But we always
+ * use the ones here.
+ */
+#undef __NFDBITS
+#define __NFDBITS (8 * sizeof(unsigned long))
+
+#undef __FD_SETSIZE
+#define __FD_SETSIZE 1024
+
+#undef __FDSET_LONGS
+#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
+
+#undef __FDELT
+#define __FDELT(d) ((d) / __NFDBITS)
+
+#undef __FDMASK
+#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
+
+typedef struct {
+ unsigned long fds_bits [__FDSET_LONGS];
+} __kernel_fd_set;
+
+/* Type of a signal handler. */
+typedef void (*__kernel_sighandler_t)(int);
+
+/* Type of a SYSV IPC key. */
+typedef int __kernel_key_t;
+typedef int __kernel_mqd_t;
+
+#include <arch/posix_types.h>
+
+#endif /* _LWK_POSIX_TYPES_H */
--- /dev/null
+/*
+ * Generic cache management functions. Everything is arch-specific,
+ * but this header exists to make sure the defines/functions can be
+ * used in a generic way.
+ *
+ * 2000-11-13 Arjan van de Ven <arjan@fenrus.demon.nl>
+ *
+ */
+
+#ifndef _LWK_PREFETCH_H
+#define _LWK_PREFETCH_H
+
+#include <lwk/types.h>
+#include <arch/processor.h>
+#include <arch/cache.h>
+
+/*
+ prefetch(x) attempts to pre-emptively get the memory pointed to
+ by address "x" into the CPU L1 cache.
+ prefetch(x) should not cause any kind of exception, prefetch(0) is
+ specifically ok.
+
+ prefetch() should be defined by the architecture, if not, the
+ #define below provides a no-op define.
+
+ There are 3 prefetch() macros:
+
+ prefetch(x) - prefetches the cacheline at "x" for read
+ prefetchw(x) - prefetches the cacheline at "x" for write
+ spin_lock_prefetch(x) - prefectches the spinlock *x for taking
+
+ there is also PREFETCH_STRIDE which is the architecure-prefered
+ "lookahead" size for prefetching streamed operations.
+
+*/
+
+/*
+ * These cannot be do{}while(0) macros. See the mental gymnastics in
+ * the loop macro.
+ */
+
+#ifndef ARCH_HAS_PREFETCH
+static inline void prefetch(const void *x) {;}
+#endif
+
+#ifndef ARCH_HAS_PREFETCHW
+static inline void prefetchw(const void *x) {;}
+#endif
+
+#ifndef ARCH_HAS_SPINLOCK_PREFETCH
+#define spin_lock_prefetch(x) prefetchw(x)
+#endif
+
+#ifndef PREFETCH_STRIDE
+#define PREFETCH_STRIDE (4*L1_CACHE_BYTES)
+#endif
+
+static inline void prefetch_range(void *addr, size_t len)
+{
+#ifdef ARCH_HAS_PREFETCH
+ char *cp;
+ char *end = addr + len;
+
+ for (cp = addr; cp < end; cp += PREFETCH_STRIDE)
+ prefetch(cp);
+#endif
+}
+
+#endif
--- /dev/null
+#ifndef _LWK_PRINT_H
+#define _LWK_PRINT_H
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+#define KERN_USERMSG "<8>" /* message from user-space */
+#define KERN_NORM "<9>" /* a "normal" message, nothing special */
+
+#define USER_EMERG "<0>" /* system is unusable */
+#define USER_ALERT "<1>" /* action must be taken immediately */
+#define USER_CRIT "<2>" /* critical conditions */
+#define USER_ERR "<3>" /* error conditions */
+#define USER_WARNING "<4>" /* warning conditions */
+#define USER_NOTICE "<5>" /* normal but significant condition */
+#define USER_INFO "<6>" /* informational */
+#define USER_DEBUG "<7>" /* debug-level messages */
+#define USER_USERMSG "<8>" /* message from user-space */
+#define USER_NORM "" /* a "normal" message, nothing special */
+
+#ifdef __KERNEL__
+#define TYPE_EMERG KERN_EMERG
+#define TYPE_ALERT KERN_ALERT
+#define TYPE_CRIT KERN_CRIT
+#define TYPE_ERR KERN_ERR
+#define TYPE_WARNING KERN_WARNING
+#define TYPE_NOTICE KERN_NOTICE
+#define TYPE_INFO KERN_INFO
+#define TYPE_DEBUG KERN_DEBUG
+#define TYPE_USERMSG KERN_USERMSG
+#define TYPE_NORM KERN_NORM
+#else
+#define TYPE_EMERG USER_EMERG
+#define TYPE_ALERT USER_ALERT
+#define TYPE_CRIT USER_CRIT
+#define TYPE_ERR USER_ERR
+#define TYPE_WARNING USER_WARNING
+#define TYPE_NOTICE USER_NOTICE
+#define TYPE_INFO USER_INFO
+#define TYPE_DEBUG USER_DEBUG
+#define TYPE_USERMSG USER_USERMSG
+#define TYPE_NORM USER_NORM
+#endif
+
+#ifdef __KERNEL__
+#include <stdarg.h>
+#include <lwk/types.h>
+extern int sprintf(char * buf, const char * fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int vsprintf(char *buf, const char *, va_list)
+ __attribute__ ((format (printf, 2, 0)));
+extern int snprintf(char * buf, size_t size, const char * fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+extern int vprintk(const char *fmt, va_list args)
+ __attribute__ ((format (printf, 1, 0)));
+extern int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+#define print printk
+#else
+#include <stdio.h>
+#include <stdarg.h>
+#define print printf
+#endif
+
+#endif
--- /dev/null
+#ifndef _LWK_PTRACE_H
+#define _LWK_PTRACE_H
+/* ptrace.h */
+/* structs and defines to help the user use the ptrace system call. */
+
+/* has the defines to get at the registers. */
+
+#define PTRACE_TRACEME 0
+#define PTRACE_PEEKTEXT 1
+#define PTRACE_PEEKDATA 2
+#define PTRACE_PEEKUSR 3
+#define PTRACE_POKETEXT 4
+#define PTRACE_POKEDATA 5
+#define PTRACE_POKEUSR 6
+#define PTRACE_CONT 7
+#define PTRACE_KILL 8
+#define PTRACE_SINGLESTEP 9
+
+#define PTRACE_ATTACH 0x10
+#define PTRACE_DETACH 0x11
+
+#define PTRACE_SYSCALL 24
+
+/* 0x4200-0x4300 are reserved for architecture-independent additions. */
+#define PTRACE_SETOPTIONS 0x4200
+#define PTRACE_GETEVENTMSG 0x4201
+#define PTRACE_GETSIGINFO 0x4202
+#define PTRACE_SETSIGINFO 0x4203
+
+/* options set using PTRACE_SETOPTIONS */
+#define PTRACE_O_TRACESYSGOOD 0x00000001
+#define PTRACE_O_TRACEFORK 0x00000002
+#define PTRACE_O_TRACEVFORK 0x00000004
+#define PTRACE_O_TRACECLONE 0x00000008
+#define PTRACE_O_TRACEEXEC 0x00000010
+#define PTRACE_O_TRACEVFORKDONE 0x00000020
+#define PTRACE_O_TRACEEXIT 0x00000040
+
+#define PTRACE_O_MASK 0x0000007f
+
+/* Wait extended result codes for the above trace options. */
+#define PTRACE_EVENT_FORK 1
+#define PTRACE_EVENT_VFORK 2
+#define PTRACE_EVENT_CLONE 3
+#define PTRACE_EVENT_EXEC 4
+#define PTRACE_EVENT_VFORK_DONE 5
+#define PTRACE_EVENT_EXIT 6
+
+#include <arch/ptrace.h>
+
+#ifdef __KERNEL__
+/*
+ * Ptrace flags
+ */
+
+#define PT_PTRACED 0x00000001
+#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
+#define PT_TRACESYSGOOD 0x00000004
+#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
+#define PT_TRACE_FORK 0x00000010
+#define PT_TRACE_VFORK 0x00000020
+#define PT_TRACE_CLONE 0x00000040
+#define PT_TRACE_EXEC 0x00000080
+#define PT_TRACE_VFORK_DONE 0x00000100
+#define PT_TRACE_EXIT 0x00000200
+#define PT_ATTACHED 0x00000400 /* parent != real_parent */
+
+#define PT_TRACE_MASK 0x000003f4
+
+/* single stepping state bits (used on ARM and PA-RISC) */
+#define PT_SINGLESTEP_BIT 31
+#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
+#define PT_BLOCKSTEP_BIT 30
+#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
+
+#include <lwk/compiler.h> /* For unlikely. */
+#include <lwk/task.h>
+
+
+extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
+extern struct task_struct *ptrace_get_task(pid_t pid);
+extern int ptrace_traceme(void);
+extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
+extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
+extern int ptrace_attach(struct task_struct *tsk);
+extern int ptrace_detach(struct task_struct *, unsigned int);
+extern void __ptrace_detach(struct task_struct *, unsigned int);
+extern void ptrace_disable(struct task_struct *);
+extern int ptrace_check_attach(struct task_struct *task, int kill);
+extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
+extern void ptrace_notify(int exit_code);
+extern void __ptrace_link(struct task_struct *child,
+ struct task_struct *new_parent);
+extern void __ptrace_unlink(struct task_struct *child);
+extern void ptrace_untrace(struct task_struct *child);
+extern int ptrace_may_attach(struct task_struct *task);
+
+static inline void ptrace_link(struct task_struct *child,
+ struct task_struct *new_parent)
+{
+ if (unlikely(child->ptrace))
+ __ptrace_link(child, new_parent);
+}
+static inline void ptrace_unlink(struct task_struct *child)
+{
+ if (unlikely(child->ptrace))
+ __ptrace_unlink(child);
+}
+
+
+#ifndef force_successful_syscall_return
+/*
+ * System call handlers that, upon successful completion, need to return a
+ * negative value should call force_successful_syscall_return() right before
+ * returning. On architectures where the syscall convention provides for a
+ * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
+ * others), this macro can be used to ensure that the error flag will not get
+ * set. On architectures which do not support a separate error flag, the macro
+ * is a no-op and the spurious error condition needs to be filtered out by some
+ * other means (e.g., in user-level, by passing an extra argument to the
+ * syscall handler, or something along those lines).
+ */
+#define force_successful_syscall_return() do { } while (0)
+#endif
+
+#endif
+
+#endif
--- /dev/null
+/**
+ * Derived from Linux include/linux/ioport.h.
+ * Original comment at head of ioport.h:
+ *
+ * ioport.h Definitions of routines for detecting, reserving and
+ * allocating system resources.
+ *
+ * Authors: Linus Torvalds
+ */
+
+#ifndef _LWK_RESOURCE_H
+#define _LWK_RESOURCE_H
+
+/**
+ * Resources are tree-like, allowing
+ * nesting etc..
+ */
+struct resource {
+ const char *name;
+ unsigned long start, end;
+ unsigned long flags;
+ struct resource *parent, *sibling, *child;
+};
+
+/**
+ * PC/ISA/whatever - the normal PC address spaces: IO and memory
+ */
+extern struct resource ioport_resource;
+extern struct resource iomem_resource;
+
+/**
+ * IO resources have these defined flags.
+ */
+#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
+
+#define IORESOURCE_IO 0x00000100 /* Resource type */
+#define IORESOURCE_MEM 0x00000200
+#define IORESOURCE_IRQ 0x00000400
+#define IORESOURCE_DMA 0x00000800
+
+#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
+#define IORESOURCE_READONLY 0x00002000
+#define IORESOURCE_CACHEABLE 0x00004000
+#define IORESOURCE_RANGELENGTH 0x00008000
+#define IORESOURCE_SHADOWABLE 0x00010000
+#define IORESOURCE_BUS_HAS_VGA 0x00080000
+
+#define IORESOURCE_DISABLED 0x10000000
+#define IORESOURCE_UNSET 0x20000000
+#define IORESOURCE_AUTO 0x40000000
+#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
+
+/* ISA PnP IRQ specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
+#define IORESOURCE_IRQ_LOWEDGE (1<<1)
+#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
+#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
+
+/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_DMA_TYPE_MASK (3<<0)
+#define IORESOURCE_DMA_8BIT (0<<0)
+#define IORESOURCE_DMA_8AND16BIT (1<<0)
+#define IORESOURCE_DMA_16BIT (2<<0)
+
+#define IORESOURCE_DMA_MASTER (1<<2)
+#define IORESOURCE_DMA_BYTE (1<<3)
+#define IORESOURCE_DMA_WORD (1<<4)
+
+#define IORESOURCE_DMA_SPEED_MASK (3<<6)
+#define IORESOURCE_DMA_COMPATIBLE (0<<6)
+#define IORESOURCE_DMA_TYPEA (1<<6)
+#define IORESOURCE_DMA_TYPEB (2<<6)
+#define IORESOURCE_DMA_TYPEF (3<<6)
+
+/* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
+#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
+#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
+#define IORESOURCE_MEM_TYPE_MASK (3<<3)
+#define IORESOURCE_MEM_8BIT (0<<3)
+#define IORESOURCE_MEM_16BIT (1<<3)
+#define IORESOURCE_MEM_8AND16BIT (2<<3)
+#define IORESOURCE_MEM_32BIT (3<<3)
+#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
+#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+
+/* PCI ROM control bits (IORESOURCE_BITS) */
+#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
+#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
+#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
+
+extern int request_resource(struct resource *root, struct resource *new);
+extern struct resource * ____request_resource(struct resource *root, struct resource *new);
+extern int release_resource(struct resource *new);
+extern int insert_resource(struct resource *parent, struct resource *new);
+extern int allocate_resource(struct resource *root, struct resource *new,
+ unsigned long size,
+ unsigned long min, unsigned long max,
+ unsigned long align,
+ void (*alignf)(void *, struct resource *,
+ unsigned long, unsigned long),
+ void *alignf_data);
+extern int adjust_resource(struct resource *res, unsigned long start,
+ unsigned long size);
+
+#endif
--- /dev/null
+#ifndef _LWK_SCHED_H
+#define _LWK_SCHED_H
+
+#include <lwk/task.h>
+#include <lwk/init.h>
+
+extern int __init sched_subsys_init(void);
+extern void sched_add_task(struct task_struct *task);
+extern void sched_del_task(struct task_struct *task);
+extern int sched_wakeup_task(struct task_struct *task,
+ taskstate_t valid_states);
+extern void schedule(void);
+
+extern struct task_struct *arch_context_switch(struct task_struct *prev,
+ struct task_struct *next);
+extern void arch_idle_task_loop_body(void);
+
+#endif
--- /dev/null
+#ifndef _SCREEN_INFO_H
+#define _SCREEN_INFO_H
+
+#include <lwk/types.h>
+
+/*
+ * These are set up by the setup-routine at boot-time:
+ */
+
+struct screen_info {
+ u8 orig_x; /* 0x00 */
+ u8 orig_y; /* 0x01 */
+ u16 dontuse1; /* 0x02 -- EXT_MEM_K sits here */
+ u16 orig_video_page; /* 0x04 */
+ u8 orig_video_mode; /* 0x06 */
+ u8 orig_video_cols; /* 0x07 */
+ u16 unused2; /* 0x08 */
+ u16 orig_video_ega_bx; /* 0x0a */
+ u16 unused3; /* 0x0c */
+ u8 orig_video_lines; /* 0x0e */
+ u8 orig_video_isVGA; /* 0x0f */
+ u16 orig_video_points; /* 0x10 */
+
+ /* VESA graphic mode -- linear frame buffer */
+ u16 lfb_width; /* 0x12 */
+ u16 lfb_height; /* 0x14 */
+ u16 lfb_depth; /* 0x16 */
+ u32 lfb_base; /* 0x18 */
+ u32 lfb_size; /* 0x1c */
+ u16 dontuse2, dontuse3; /* 0x20 -- CL_MAGIC and CL_OFFSET here */
+ u16 lfb_linelength; /* 0x24 */
+ u8 red_size; /* 0x26 */
+ u8 red_pos; /* 0x27 */
+ u8 green_size; /* 0x28 */
+ u8 green_pos; /* 0x29 */
+ u8 blue_size; /* 0x2a */
+ u8 blue_pos; /* 0x2b */
+ u8 rsvd_size; /* 0x2c */
+ u8 rsvd_pos; /* 0x2d */
+ u16 vesapm_seg; /* 0x2e */
+ u16 vesapm_off; /* 0x30 */
+ u16 pages; /* 0x32 */
+ u16 vesa_attributes; /* 0x34 */
+ u32 capabilities; /* 0x36 */
+ /* 0x3a -- 0x3f reserved for future expansion */
+};
+
+extern struct screen_info screen_info;
+
+#define ORIG_X (screen_info.orig_x)
+#define ORIG_Y (screen_info.orig_y)
+#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
+#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
+#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
+#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
+#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
+#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
+
+#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+#define VIDEO_TYPE_CGA 0x11 /* CGA Display */
+#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */
+#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */
+#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */
+#define VIDEO_TYPE_VLFB 0x23 /* VESA VGA in graphic mode */
+
+#define VIDEO_TYPE_PICA_S3 0x30 /* ACER PICA-61 local S3 video */
+#define VIDEO_TYPE_MIPS_G364 0x31 /* MIPS Magnum 4000 G364 video */
+#define VIDEO_TYPE_SGI 0x33 /* Various SGI graphics hardware */
+
+#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */
+
+#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */
+#define VIDEO_TYPE_SUNPCI 0x51 /* Sun PCI based frame buffer. */
+
+#define VIDEO_TYPE_PMAC 0x60 /* PowerMacintosh frame buffer. */
+
+#endif /* _SCREEN_INFO_H */
--- /dev/null
+#ifndef _LWK_SEQLOCK_H
+#define _LWK_SEQLOCK_H
+/*
+ * Reader/writer consistent mechanism without starving writers. This type of
+ * lock for data where the reader wants a consitent set of information
+ * and is willing to retry if the information changes. Readers never
+ * block but they may have to retry if a writer is in
+ * progress. Writers do not wait for readers.
+ *
+ * This is not as cache friendly as brlock. Also, this will not work
+ * for data that contains pointers, because any writer could
+ * invalidate a pointer that a reader was following.
+ *
+ * Expected reader usage:
+ * do {
+ * seq = read_seqbegin(&foo);
+ * ...
+ * } while (read_seqretry(&foo, seq));
+ *
+ *
+ * On non-SMP the spin locks disappear but the writer still needs
+ * to increment the sequence variables because an interrupt routine could
+ * change the state of the data.
+ *
+ * Based on x86_64 vsyscall gettimeofday
+ * by Keith Owens and Andrea Arcangeli
+ */
+
+#include <lwk/spinlock.h>
+
+typedef struct {
+ unsigned sequence;
+ spinlock_t lock;
+} seqlock_t;
+
+/*
+ * These macros triggered gcc-3.x compile-time problems. We think these are
+ * OK now. Be cautious.
+ */
+#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
+#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
+
+
+/* Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ */
+static inline void write_seqlock(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+ ++sl->sequence;
+ smp_wmb();
+}
+
+static inline void write_sequnlock(seqlock_t *sl)
+{
+ smp_wmb();
+ sl->sequence++;
+ spin_unlock(&sl->lock);
+}
+
+static inline int write_tryseqlock(seqlock_t *sl)
+{
+ int ret = spin_trylock(&sl->lock);
+
+ if (ret) {
+ ++sl->sequence;
+ smp_wmb();
+ }
+ return ret;
+}
+
+/* Start of read calculation -- fetch last complete writer token */
+static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+ unsigned ret = sl->sequence;
+ smp_rmb();
+ return ret;
+}
+
+/* Test if reader processed invalid data.
+ * If initial values is odd,
+ * then writer had already started when section was entered
+ * If sequence value changed
+ * then writer changed data while in section
+ *
+ * Using xor saves one conditional branch.
+ */
+static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv)
+{
+ smp_rmb();
+ return (iv & 1) | (sl->sequence ^ iv);
+}
+
+
+/*
+ * Version using sequence counter only.
+ * This can be used when code has its own mutex protecting the
+ * updating starting before the write_seqcountbeqin() and ending
+ * after the write_seqcount_end().
+ */
+
+typedef struct seqcount {
+ unsigned sequence;
+} seqcount_t;
+
+#define SEQCNT_ZERO { 0 }
+#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
+
+/* Start of read using pointer to a sequence counter only. */
+static inline unsigned read_seqcount_begin(const seqcount_t *s)
+{
+ unsigned ret = s->sequence;
+ smp_rmb();
+ return ret;
+}
+
+/* Test if reader processed invalid data.
+ * Equivalent to: iv is odd or sequence number has changed.
+ * (iv & 1) || (*s != iv)
+ * Using xor saves one conditional branch.
+ */
+static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
+{
+ smp_rmb();
+ return (iv & 1) | (s->sequence ^ iv);
+}
+
+
+/*
+ * Sequence counter only version assumes that callers are using their
+ * own mutexing.
+ */
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+}
+
+static inline void write_seqcount_end(seqcount_t *s)
+{
+ smp_wmb();
+ s->sequence++;
+}
+
+/*
+ * Possible sw/hw IRQ protected versions of the interfaces.
+ */
+#define write_seqlock_irqsave(lock, flags) \
+ do { local_irq_save(flags); write_seqlock(lock); } while (0)
+#define write_seqlock_irq(lock) \
+ do { local_irq_disable(); write_seqlock(lock); } while (0)
+#define write_seqlock_bh(lock) \
+ do { local_bh_disable(); write_seqlock(lock); } while (0)
+
+#define write_sequnlock_irqrestore(lock, flags) \
+ do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
+#define write_sequnlock_irq(lock) \
+ do { write_sequnlock(lock); local_irq_enable(); } while(0)
+#define write_sequnlock_bh(lock) \
+ do { write_sequnlock(lock); local_bh_enable(); } while(0)
+
+#define read_seqbegin_irqsave(lock, flags) \
+ ({ local_irq_save(flags); read_seqbegin(lock); })
+
+#define read_seqretry_irqrestore(lock, iv, flags) \
+ ({ \
+ int ret = read_seqretry(lock, iv); \
+ local_irq_restore(flags); \
+ ret; \
+ })
+
+#endif /* _LWK_SEQLOCK_H */
--- /dev/null
+#ifndef _LWK_SHOW_H
+#define _LWK_SHOW_H
+
+#include <arch/show.h>
+
+extern void show_memory(unsigned long vaddr, size_t n);
+
+#endif
--- /dev/null
+#ifndef _LWK_SIGNAL_H
+#define _LWK_SIGNAL_H
+
+#include <arch/signal.h>
+#include <arch/siginfo.h>
+
+#ifdef __KERNEL__
+#include <lwk/list.h>
+#include <lwk/spinlock.h>
+
+/*
+ * Real Time signals may be queued.
+ */
+
+struct sigqueue {
+ struct list_head list;
+ int flags;
+ siginfo_t info;
+ struct user_struct *user;
+};
+
+/* flags values. */
+#define SIGQUEUE_PREALLOC 1
+
+struct sigpending {
+ struct list_head list;
+ sigset_t signal;
+};
+
+/*
+ * Define some primitives to manipulate sigset_t.
+ */
+
+#ifndef __HAVE_ARCH_SIG_BITOPS
+#include <lwk/bitops.h>
+
+/* We don't use <lwk/bitops.h> for these because there is no need to
+ be atomic. */
+static inline void sigaddset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if (_NSIG_WORDS == 1)
+ set->sig[0] |= 1UL << sig;
+ else
+ set->sig[sig / _NSIG_BPW] |= 1UL << (sig % _NSIG_BPW);
+}
+
+static inline void sigdelset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if (_NSIG_WORDS == 1)
+ set->sig[0] &= ~(1UL << sig);
+ else
+ set->sig[sig / _NSIG_BPW] &= ~(1UL << (sig % _NSIG_BPW));
+}
+
+static inline int sigismember(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if (_NSIG_WORDS == 1)
+ return 1 & (set->sig[0] >> sig);
+ else
+ return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
+}
+
+static inline int sigfindinword(unsigned long word)
+{
+ return ffz(~word);
+}
+
+#endif /* __HAVE_ARCH_SIG_BITOPS */
+
+static inline int sigisemptyset(sigset_t *set)
+{
+ extern void _NSIG_WORDS_is_unsupported_size(void);
+ switch (_NSIG_WORDS) {
+ case 4:
+ return (set->sig[3] | set->sig[2] |
+ set->sig[1] | set->sig[0]) == 0;
+ case 2:
+ return (set->sig[1] | set->sig[0]) == 0;
+ case 1:
+ return set->sig[0] == 0;
+ default:
+ _NSIG_WORDS_is_unsupported_size();
+ return 0;
+ }
+}
+
+#define sigmask(sig) (1UL << ((sig) - 1))
+
+#ifndef __HAVE_ARCH_SIG_SETOPS
+#include <lwk/string.h>
+
+#define _SIG_SET_BINOP(name, op) \
+static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
+{ \
+ extern void _NSIG_WORDS_is_unsupported_size(void); \
+ unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ a3 = a->sig[3]; a2 = a->sig[2]; \
+ b3 = b->sig[3]; b2 = b->sig[2]; \
+ r->sig[3] = op(a3, b3); \
+ r->sig[2] = op(a2, b2); \
+ case 2: \
+ a1 = a->sig[1]; b1 = b->sig[1]; \
+ r->sig[1] = op(a1, b1); \
+ case 1: \
+ a0 = a->sig[0]; b0 = b->sig[0]; \
+ r->sig[0] = op(a0, b0); \
+ break; \
+ default: \
+ _NSIG_WORDS_is_unsupported_size(); \
+ } \
+}
+
+#define _sig_or(x,y) ((x) | (y))
+_SIG_SET_BINOP(sigorsets, _sig_or)
+
+#define _sig_and(x,y) ((x) & (y))
+_SIG_SET_BINOP(sigandsets, _sig_and)
+
+#define _sig_nand(x,y) ((x) & ~(y))
+_SIG_SET_BINOP(signandsets, _sig_nand)
+
+#undef _SIG_SET_BINOP
+#undef _sig_or
+#undef _sig_and
+#undef _sig_nand
+
+#define _SIG_SET_OP(name, op) \
+static inline void name(sigset_t *set) \
+{ \
+ extern void _NSIG_WORDS_is_unsupported_size(void); \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: set->sig[3] = op(set->sig[3]); \
+ set->sig[2] = op(set->sig[2]); \
+ case 2: set->sig[1] = op(set->sig[1]); \
+ case 1: set->sig[0] = op(set->sig[0]); \
+ break; \
+ default: \
+ _NSIG_WORDS_is_unsupported_size(); \
+ } \
+}
+
+#define _sig_not(x) (~(x))
+_SIG_SET_OP(signotset, _sig_not)
+
+#undef _SIG_SET_OP
+#undef _sig_not
+
+static inline void sigemptyset(sigset_t *set)
+{
+ switch (_NSIG_WORDS) {
+ default:
+ memset(set, 0, sizeof(sigset_t));
+ break;
+ case 2: set->sig[1] = 0;
+ case 1: set->sig[0] = 0;
+ break;
+ }
+}
+
+static inline void sigfillset(sigset_t *set)
+{
+ switch (_NSIG_WORDS) {
+ default:
+ memset(set, -1, sizeof(sigset_t));
+ break;
+ case 2: set->sig[1] = -1;
+ case 1: set->sig[0] = -1;
+ break;
+ }
+}
+
+/* Some extensions for manipulating the low 32 signals in particular. */
+
+static inline void sigaddsetmask(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] |= mask;
+}
+
+static inline void sigdelsetmask(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] &= ~mask;
+}
+
+static inline int sigtestsetmask(sigset_t *set, unsigned long mask)
+{
+ return (set->sig[0] & mask) != 0;
+}
+
+static inline void siginitset(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] = mask;
+ switch (_NSIG_WORDS) {
+ default:
+ memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1));
+ break;
+ case 2: set->sig[1] = 0;
+ case 1: ;
+ }
+}
+
+static inline void siginitsetinv(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] = ~mask;
+ switch (_NSIG_WORDS) {
+ default:
+ memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1));
+ break;
+ case 2: set->sig[1] = -1;
+ case 1: ;
+ }
+}
+
+#endif /* __HAVE_ARCH_SIG_SETOPS */
+
+static inline void init_sigpending(struct sigpending *sig)
+{
+ sigemptyset(&sig->signal);
+ INIT_LIST_HEAD(&sig->list);
+}
+
+extern void flush_sigqueue(struct sigpending *queue);
+
+/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+static inline int valid_signal(unsigned long sig)
+{
+ return sig <= _NSIG ? 1 : 0;
+}
+
+extern int next_signal(struct sigpending *pending, sigset_t *mask);
+extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
+extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern long do_sigpending(void __user *, unsigned long);
+extern int sigprocmask(int, sigset_t *, sigset_t *);
+
+struct pt_regs;
+extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
+
+extern struct kmem_cache *sighand_cachep;
+
+/*
+ * In POSIX a signal is sent either to a specific thread (LWK task)
+ * or to the process as a whole (LWK thread group). How the signal
+ * is sent determines whether it's to one thread or the whole group,
+ * which determines which signal mask(s) are involved in blocking it
+ * from being delivered until later. When the signal is delivered,
+ * either it's caught or ignored by a user handler or it has a default
+ * effect that applies to the whole thread group (POSIX process).
+ *
+ * The possible effects an unblocked signal set to SIG_DFL can have are:
+ * ignore - Nothing Happens
+ * terminate - kill the process, i.e. all threads in the group,
+ * similar to exit_group. The group leader (only) reports
+ * WIFSIGNALED status to its parent.
+ * coredump - write a core dump file describing all threads using
+ * the same mm and then kill all those threads
+ * stop - stop all the threads in the group, i.e. TASK_STOPPED state
+ *
+ * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
+ * Other signals when not blocked and set to SIG_DFL behaves as follows.
+ * The job control signals also have other special effects.
+ *
+ * +--------------------+------------------+
+ * | POSIX signal | default action |
+ * +--------------------+------------------+
+ * | SIGHUP | terminate |
+ * | SIGINT | terminate |
+ * | SIGQUIT | coredump |
+ * | SIGILL | coredump |
+ * | SIGTRAP | coredump |
+ * | SIGABRT/SIGIOT | coredump |
+ * | SIGBUS | coredump |
+ * | SIGFPE | coredump |
+ * | SIGKILL | terminate(+) |
+ * | SIGUSR1 | terminate |
+ * | SIGSEGV | coredump |
+ * | SIGUSR2 | terminate |
+ * | SIGPIPE | terminate |
+ * | SIGALRM | terminate |
+ * | SIGTERM | terminate |
+ * | SIGCHLD | ignore |
+ * | SIGCONT | ignore(*) |
+ * | SIGSTOP | stop(*)(+) |
+ * | SIGTSTP | stop(*) |
+ * | SIGTTIN | stop(*) |
+ * | SIGTTOU | stop(*) |
+ * | SIGURG | ignore |
+ * | SIGXCPU | coredump |
+ * | SIGXFSZ | coredump |
+ * | SIGVTALRM | terminate |
+ * | SIGPROF | terminate |
+ * | SIGPOLL/SIGIO | terminate |
+ * | SIGSYS/SIGUNUSED | coredump |
+ * | SIGSTKFLT | terminate |
+ * | SIGWINCH | ignore |
+ * | SIGPWR | terminate |
+ * | SIGRTMIN-SIGRTMAX | terminate |
+ * +--------------------+------------------+
+ * | non-POSIX signal | default action |
+ * +--------------------+------------------+
+ * | SIGEMT | coredump |
+ * +--------------------+------------------+
+ *
+ * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
+ * (*) Special job control effects:
+ * When SIGCONT is sent, it resumes the process (all threads in the group)
+ * from TASK_STOPPED state and also clears any pending/queued stop signals
+ * (any of those marked with "stop(*)"). This happens regardless of blocking,
+ * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
+ * any pending/queued SIGCONT signals; this happens regardless of blocking,
+ * catching, or ignored the stop signal, though (except for SIGSTOP) the
+ * default action of stopping the process may happen later or never.
+ */
+
+#ifdef SIGEMT
+#define SIGEMT_MASK rt_sigmask(SIGEMT)
+#else
+#define SIGEMT_MASK 0
+#endif
+
+#if SIGRTMIN > BITS_PER_LONG
+#define rt_sigmask(sig) (1ULL << ((sig)-1))
+#else
+#define rt_sigmask(sig) sigmask(sig)
+#endif
+#define siginmask(sig, mask) (rt_sigmask(sig) & (mask))
+
+#define SIG_KERNEL_ONLY_MASK (\
+ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
+
+#define SIG_KERNEL_STOP_MASK (\
+ rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \
+ rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) )
+
+#define SIG_KERNEL_COREDUMP_MASK (\
+ rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \
+ rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \
+ rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \
+ rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \
+ rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \
+ SIGEMT_MASK )
+
+#define SIG_KERNEL_IGNORE_MASK (\
+ rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
+ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
+
+#define sig_kernel_only(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK))
+#define sig_kernel_coredump(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK))
+#define sig_kernel_ignore(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK))
+#define sig_kernel_stop(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
+
+#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
+
+#define sig_user_defined(t, signr) \
+ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
+ ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
+
+#define sig_fatal(t, signr) \
+ (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
+ (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LWK_SIGNAL_H */
--- /dev/null
+#ifndef __LINUX_SMP_H
+#define __LINUX_SMP_H
+
+/*
+ * Generic SMP support
+ * Alan Cox. <alan@redhat.com>
+ */
+
+#include <lwk/kernel.h>
+#include <lwk/compiler.h>
+#include <arch/smp.h>
+
+/*
+ * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
+ * (defined in asm header):
+ */
+
+/*
+ * stops all CPUs but the current one:
+ */
+extern void smp_send_stop(void);
+
+/*
+ * sends a 'reschedule' event to another CPU:
+ */
+extern void smp_send_reschedule(int cpu);
+
+
+/*
+ * Prepare machine for booting other CPUs.
+ */
+extern void smp_prepare_cpus(unsigned int max_cpus);
+
+/*
+ * Bring a CPU up
+ */
+extern int __cpu_up(unsigned int cpunum);
+
+/*
+ * Final polishing of CPUs
+ */
+extern void smp_cpus_done(unsigned int max_cpus);
+
+/*
+ * Call a function on all other processors
+ */
+int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
+
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
+
+#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
+#define MSG_ALL 0x8001
+
+#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */
+#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's
+ * when rebooting
+ */
+#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
+#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
+
+/*
+ * Mark the boot cpu "online" so that it can call console drivers in
+ * printk() and can access its per-cpu storage.
+ */
+void smp_prepare_boot_cpu(void);
+
+#define smp_processor_id() raw_smp_processor_id()
+#define get_cpu() smp_processor_id()
+#define put_cpu() do { } while (0)
+#define put_cpu_no_resched() do { } while (0)
+
+/**
+ * Returns the current CPU's logical ID.
+ */
+#define this_cpu smp_processor_id()
+
+void __init arch_boot_cpu(unsigned int cpu);
+void __init cpu_init(void);
+
+#endif /* _LWK_SMP_H */
--- /dev/null
+#ifndef _LWK_SORT_H
+#define _LWK_SORT_H
+
+#include <lwk/types.h>
+
+void sort(void *base, size_t num, size_t size,
+ int (*cmp)(const void *, const void *),
+ void (*swap)(void *, void *, int));
+
+#endif
--- /dev/null
+#ifndef _LWK_SPINLOCK_H
+#define _LWK_SPINLOCK_H
+
+/*
+ * include/lwk/spinlock.h - generic spinlock/rwlock declarations
+ *
+ * here's the role of the various spinlock/rwlock related include files:
+ *
+ * arch/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * initializers
+ *
+ * lwk/spinlock_types.h:
+ * defines the generic type and initializers
+ *
+ * arch/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * implementations, mostly inline assembly code
+ *
+ * lwk/spinlock_api_smp.h:
+ * contains the prototypes for the _spin_*() APIs.
+ *
+ * lwk/spinlock.h: builds the final spin_*() APIs.
+ *
+ * lwk/spinlock.h: builds the final spin_*() APIs.
+ */
+
+#include <lwk/kernel.h>
+#include <lwk/linkage.h>
+#include <lwk/compiler.h>
+#include <lwk/stringify.h>
+#include <arch/system.h>
+
+/*
+ * Must define these before including other files, inline functions need them
+ */
+#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
+
+#define LOCK_SECTION_START(extra) \
+ ".subsection 1\n\t" \
+ extra \
+ ".ifndef " LOCK_SECTION_NAME "\n\t" \
+ LOCK_SECTION_NAME ":\n\t" \
+ ".endif\n"
+
+#define LOCK_SECTION_END \
+ ".previous\n\t"
+
+#define __lockfunc fastcall __attribute__((section(".spinlock.text")))
+
+/*
+ * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ */
+#include <lwk/spinlock_types.h>
+
+extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
+
+/*
+ * Pull the __raw*() functions/declarations:
+ */
+# include <arch/spinlock.h>
+
+#define spin_lock_init(lock) do { *(lock) = (spinlock_t)SPIN_LOCK_UNLOCKED; } while (0)
+#define rwlock_init(lock) do { *(lock) = (rwlock_t)RW_LOCK_UNLOCKED; } while (0)
+
+#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+
+/**
+ * spin_unlock_wait - wait until the spinlock gets unlocked
+ * @lock: the spinlock in question.
+ */
+#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#include <lwk/spinlock_api_smp.h>
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void _raw_spin_lock(spinlock_t *lock);
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+ extern int _raw_spin_trylock(spinlock_t *lock);
+ extern void _raw_spin_unlock(spinlock_t *lock);
+
+ extern void _raw_read_lock(rwlock_t *lock);
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+#else
+# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
+# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
+# define _raw_spin_lock_flags(lock, flags) \
+ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various spin_lock and rw_lock methods.
+ */
+#define spin_trylock(lock) __cond_lock(_spin_trylock(lock))
+#define read_trylock(lock) __cond_lock(_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(_write_trylock(lock))
+
+#define spin_lock(lock) _spin_lock(lock)
+#define write_lock(lock) _write_lock(lock)
+#define read_lock(lock) _read_lock(lock)
+
+#define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock)
+#define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock)
+#define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock)
+
+#define spin_lock_irq(lock) _spin_lock_irq(lock)
+#define spin_lock_bh(lock) _spin_lock_bh(lock)
+
+#define read_lock_irq(lock) _read_lock_irq(lock)
+#define read_lock_bh(lock) _read_lock_bh(lock)
+
+#define write_lock_irq(lock) _write_lock_irq(lock)
+#define write_lock_bh(lock) _write_lock_bh(lock)
+
+/*
+ * We inline the unlock functions in the nondebug case:
+ */
+#if defined(CONFIG_DEBUG_SPINLOCK)
+# define spin_unlock(lock) _spin_unlock(lock)
+# define read_unlock(lock) _read_unlock(lock)
+# define write_unlock(lock) _write_unlock(lock)
+#else
+# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
+# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
+# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
+#endif
+
+#if defined(CONFIG_DEBUG_SPINLOCK)
+# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
+# define read_unlock_irq(lock) _read_unlock_irq(lock)
+# define write_unlock_irq(lock) _write_unlock_irq(lock)
+#else
+# define spin_unlock_irq(lock) \
+ do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
+# define read_unlock_irq(lock) \
+ do { __raw_read_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
+# define write_unlock_irq(lock) \
+ do { __raw_write_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
+#endif
+
+#define spin_unlock_irqrestore(lock, flags) \
+ _spin_unlock_irqrestore(lock, flags)
+#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ _read_unlock_irqrestore(lock, flags)
+#define read_unlock_bh(lock) _read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ _write_unlock_irqrestore(lock, flags)
+#define write_unlock_bh(lock) _write_unlock_bh(lock)
+
+#define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock) \
+({ \
+ local_irq_disable(); \
+ _spin_trylock(lock) ? \
+ 1 : ({ local_irq_enable(); 0; }); \
+})
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ _spin_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+/*
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
+ */
+#include <arch/atomic.h>
+/**
+ * atomic_dec_and_lock - lock on reaching reference count zero
+ * @atomic: the atomic counter
+ * @lock: the spinlock in question
+ */
+extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+#define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(_atomic_dec_and_lock(atomic, lock))
+
+/**
+ * spin_can_lock - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define spin_can_lock(lock) (!spin_is_locked(lock))
+
+#endif /* _LWK_SPINLOCK_H */
--- /dev/null
+#ifndef _LWK_SPINLOCK_API_SMP_H
+#define _LWK_SPINLOCK_API_SMP_H
+
+#ifndef _LWK_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/lwk/spinlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+int in_lock_functions(unsigned long addr);
+
+#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
+
+void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
+void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
+void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
+void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
+void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
+void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t);
+void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t);
+void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
+void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t);
+unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+ __acquires(spinlock_t);
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+ __acquires(rwlock_t);
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+ __acquires(rwlock_t);
+int __lockfunc _spin_trylock(spinlock_t *lock);
+int __lockfunc _read_trylock(rwlock_t *lock);
+int __lockfunc _write_trylock(rwlock_t *lock);
+int __lockfunc _spin_trylock_bh(spinlock_t *lock);
+void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t);
+void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t);
+void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t);
+void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t);
+void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
+void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t);
+void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t);
+void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
+void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t);
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+ __releases(spinlock_t);
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(rwlock_t);
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(rwlock_t);
+
+#endif /* _LWK_SPINLOCK_API_SMP_H */
--- /dev/null
+#ifndef _LWK_SPINLOCK_TYPES_H
+#define _LWK_SPINLOCK_TYPES_H
+
+/*
+ * include/lwk/spinlock_types.h - generic spinlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#include <arch/spinlock_types.h>
+
+typedef struct {
+ raw_spinlock_t raw_lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+typedef struct {
+ raw_rwlock_t raw_lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_LOCK_UNLOCKED \
+ { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ .magic = SPINLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1 }
+#define RW_LOCK_UNLOCKED \
+ { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1 }
+#else
+# define SPIN_LOCK_UNLOCKED \
+ { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED }
+#define RW_LOCK_UNLOCKED \
+ { .raw_lock = __RAW_RW_LOCK_UNLOCKED }
+#endif
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
+#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+
+#endif /* _LWK_SPINLOCK_TYPES_H */
--- /dev/null
+#ifndef _LWK_STAT_H
+#define _LWK_STAT_H
+
+#include <arch/stat.h>
+
+#endif
--- /dev/null
+#ifndef _LWK_STDDEF_H
+#define _LWK_STDDEF_H
+
+#include <lwk/compiler.h>
+
+#undef NULL
+#if defined(__cplusplus)
+#define NULL 0
+#else
+#define NULL ((void *)0)
+#endif
+
+#ifdef __KERNEL__
+#define false 0
+#define true 1
+#endif
+
+#undef offsetof
+#ifdef __compiler_offsetof
+#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#else
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+#endif
--- /dev/null
+#ifndef _LWK_STRING_H
+#define _LWK_STRING_H
+
+/* We don't want strings.h stuff being used by user stuff by accident */
+
+#ifndef __KERNEL__
+#include <string.h>
+#endif
+
+#ifdef __KERNEL__
+
+#include <lwk/compiler.h> /* for inline */
+#include <lwk/types.h> /* for size_t */
+#include <lwk/stddef.h> /* for NULL */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char *strndup_user(const char __user *, long);
+
+/*
+ * Include machine specific inline routines
+ */
+#include <arch/string.h>
+
+#ifndef __HAVE_ARCH_STRCPY
+extern char * strcpy(char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCPY
+extern char * strncpy(char *,const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRLCPY
+size_t strlcpy(char *, const char *, size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCAT
+extern char * strcat(char *, const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCAT
+extern char * strncat(char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRLCAT
+extern size_t strlcat(char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCMP
+extern int strcmp(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCMP
+extern int strncmp(const char *,const char *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRNICMP
+extern int strnicmp(const char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCHR
+extern char * strchr(const char *,int);
+#endif
+#ifndef __HAVE_ARCH_STRNCHR
+extern char * strnchr(const char *, size_t, int);
+#endif
+#ifndef __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char *,int);
+#endif
+#ifndef __HAVE_ARCH_STRSTR
+extern char * strstr(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRLEN
+extern __kernel_size_t strlen(const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNLEN
+extern __kernel_size_t strnlen(const char *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRPBRK
+extern char * strpbrk(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRSEP
+extern char * strsep(char **,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRSPN
+extern __kernel_size_t strspn(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRCSPN
+extern __kernel_size_t strcspn(const char *,const char *);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET
+extern void * memset(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMSCAN
+extern void * memscan(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *,int,__kernel_size_t);
+#endif
+
+extern char *kstrdup(const char *s, gfp_t gfp);
+
+extern char *strerror(int errnum);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+#endif /* _LWK_STRING_H */
--- /dev/null
+#ifndef __LWK_STRINGIFY_H
+#define __LWK_STRINGIFY_H
+
+/* Indirect stringification. Doing two levels allows the parameter to be a
+ * macro itself. For example, compile with -DFOO=bar, __stringify(FOO)
+ * converts to "bar".
+ */
+
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#endif /* !__LWK_STRINGIFY_H */
--- /dev/null
+#ifndef _LWK_TASK_H
+#define _LWK_TASK_H
+
+#include <lwk/types.h>
+#include <lwk/idspace.h>
+#include <lwk/cpumask.h>
+
+/**
+ * Valid user-space created task IDs are in interval
+ * [TASK_MIN_ID, TASK_MAX_ID].
+ */
+#define TASK_MIN_ID 0
+#define TASK_MAX_ID 4094
+
+/**
+ * The task ID to use for the init_task.
+ * Put it at the top of the space to keep it out of the way.
+ */
+#define INIT_TASK_ID TASK_MAX_ID
+
+/**
+ * Task states
+ */
+#define TASKSTATE_READY (1 << 0)
+#define TASKSTATE_UNINTERRUPTIBLE (1 << 1)
+#define TASKSTATE_INTERRUPTIBLE (1 << 2)
+#define TASKSTATE_EXIT_ZOMBIE (1 << 3)
+typedef unsigned int taskstate_t;
+
+/**
+ * Events that tasks may wait for and be sent.
+ */
+#define LWKEVENT_CHILD_TASK_EXITED (1 << 0)
+#define LWKEVENT_PORTALS_EVENT_POSTED (1 << 1)
+typedef unsigned long event_t;
+
+/**
+ * Initial conditions to use for new task.
+ */
+typedef struct {
+ uid_t uid;
+ uid_t gid;
+ id_t aspace_id;
+ vaddr_t entry_point;
+ vaddr_t stack_ptr;
+ id_t cpu_id;
+ const user_cpumask_t * cpumask;
+} start_state_t;
+
+/**
+ * Core task management API.
+ * These are accessible from both kernel-space and user-space (via syscalls).
+ */
+extern int task_get_myid(id_t *id);
+extern int task_create(id_t id_request, const char *name,
+ const start_state_t *start_state, id_t *id);
+extern int task_exit(int status);
+extern int task_yield(void);
+
+#ifdef __KERNEL__
+
+#include <lwk/types.h>
+#include <lwk/init.h>
+#include <lwk/spinlock.h>
+#include <lwk/list.h>
+#include <lwk/seqlock.h>
+#include <lwk/signal.h>
+#include <lwk/idspace.h>
+#include <arch/atomic.h>
+#include <arch/page.h>
+#include <arch/processor.h>
+#include <arch/task.h>
+#include <arch/current.h>
+#include <arch/mmu.h>
+
+/**
+ * Flags for task_struct.flags field.
+ */
+#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
+
+/**
+ * Signal handler structure.
+ */
+struct sighand_struct {
+ atomic_t count;
+ struct k_sigaction action[_NSIG];
+ spinlock_t siglock;
+ struct list_head signalfd_list;
+};
+
+/**
+ * Task structure (aka Process Control Block).
+ * There is one of these for each OS-managed thread of execution in the
+ * system. A task is a generic "context of execution"... it either
+ * represents a user-level process, user-level thread, or kernel thread.
+ */
+struct task_struct {
+ id_t id; /* The task's ID */
+ char name[16]; /* The task's name */
+ struct hlist_node ht_link; /* Task hash table linkage */
+
+ taskstate_t state; /* The task's current state */
+
+ uid_t uid; /* user ID */
+ gid_t gid; /* group ID */
+
+ struct aspace * aspace; /* Address space task is in */
+ struct sighand_struct * sighand; /* signal handler info */
+
+ cpumask_t cpumask; /* CPUs this task may migrate
+ to and create tasks on */
+ id_t cpu_id; /* CPU this task is bound to */
+
+ struct list_head sched_link; /* For per-CPU scheduling lists */
+
+ unsigned long ptrace;
+ uint32_t flags;
+
+ int exit_status; /* Reason the task exited */
+
+ struct arch_task arch; /* arch specific task info */
+};
+
+union task_union {
+ struct task_struct task_info;
+ unsigned long stack[TASK_SIZE/sizeof(long)];
+};
+
+extern union task_union bootstrap_task_union;
+extern struct aspace bootstrap_aspace;
+
+/**
+ * Valid task IDs are in interval [__TASK_MIN_ID, __TASK_MAX_ID].
+ */
+#define __TASK_MIN_ID TASK_MIN_ID
+#define __TASK_MAX_ID TASK_MAX_ID+1 /* +1 for IDLE_TASK_ID */
+
+/**
+ * ID of the idle task.
+ */
+#define IDLE_TASK_ID TASK_MAX_ID+1
+
+/**
+ * Checks to see if a task structure is the init task.
+ * The init task is the first user-space task created by the kernel.
+ */
+static inline int
+is_init(struct task_struct *tsk)
+{
+ return (tsk->id == 1);
+}
+
+#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
+#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
+#define clear_used_math() clear_stopped_child_used_math(current)
+#define set_used_math() set_stopped_child_used_math(current)
+#define conditional_stopped_child_used_math(condition, child) \
+ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
+#define conditional_used_math(condition) \
+ conditional_stopped_child_used_math(condition, current)
+#define copy_to_stopped_child_used_math(child) \
+ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
+/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
+#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
+#define used_math() tsk_used_math(current)
+
+extern int __init task_subsys_init(void);
+
+extern int arch_task_create(struct task_struct *task,
+ const start_state_t *start_state);
+
+extern int sys_task_get_myid(id_t __user *id);
+extern int sys_task_create(id_t id_request, const char __user *name,
+ const start_state_t __user *start_state,
+ id_t __user *id);
+extern int sys_task_exit(int status);
+extern int sys_task_yield(void);
+
+extern int __task_reserve_id(id_t id);
+extern int __task_create(id_t id, const char *name,
+ const start_state_t *start_state,
+ struct task_struct **task);
+
+#endif
+#endif
--- /dev/null
+#ifndef _LWK_TIME_H
+#define _LWK_TIME_H
+
+#include <lwk/types.h>
+#include <lwk/init.h>
+#include <arch/time.h>
+
+#define NSEC_PER_SEC 1000000000L
+#define NSEC_PER_USEC 1000L
+#define USEC_PER_NSEC 1000L
+
+struct timeval {
+ time_t tv_sec; /* seconds */
+ suseconds_t tv_usec; /* microseconds */
+};
+
+struct timezone {
+ int tz_minuteswest; /* minutes west of Greenwich */
+ int tz_dsttime; /* type of dst correction */
+};
+
+struct timespec {
+ time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+
+void __init time_init(void);
+void init_cycles2ns(uint32_t khz);
+uint64_t cycles2ns(uint64_t cycles);
+uint64_t get_time(void);
+void set_time(uint64_t ns);
+
+#define timespec_is_valid(ts) \
+(((ts)->tv_sec >= 0) && (((unsigned long)(ts)->tv_nsec) < NSEC_PER_SEC))
+
+#endif
--- /dev/null
+#ifndef _LWK_TIMER_H
+#define _LWK_TIMER_H
+
+#include <lwk/idspace.h>
+#include <lwk/list.h>
+
+/**
+ * This structure defines a timer, including when the timer should expire
+ * and the callback function to call when it expires. The callback function
+ * runs in interrupt context with interrupts disabled.
+ */
+struct timer {
+ struct list_head link;
+ id_t cpu; /* CPU this timer is installed on */
+ uint64_t expires; /* Time when this timer expires */
+ uintptr_t data; /* arg to pass to function */
+ void (*function)(uintptr_t); /* executed when timer expires */
+};
+
+/**
+ * Core timer API.
+ */
+void timer_add(struct timer *timer);
+void timer_del(struct timer *timer);
+uint64_t timer_sleep_until(uint64_t when);
+
+/**
+ * Internal timer-subsystem functions.
+ * Normal kernel code and drivers should not call these.
+ */
+int timer_subsys_init(void);
+void expire_timers(void);
+void schedule_next_wakeup(void);
+
+/**
+ * Architecture-dependent timer functions.
+ */
+void arch_schedule_next_wakeup(uint64_t when);
+
+#endif
--- /dev/null
+#ifndef _LWK_TYPES_H
+#define _LWK_TYPES_H
+
+#ifdef __KERNEL__
+
+#define BITS_TO_LONGS(bits) \
+ (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+#define BITS_PER_BYTE 8
+#endif
+
+#include <lwk/posix_types.h>
+#include <arch/types.h>
+
+#ifndef __KERNEL_STRICT_NAMES
+
+typedef __u32 __kernel_dev_t;
+
+typedef __kernel_fd_set fd_set;
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+typedef __kernel_off_t off_t;
+typedef __kernel_pid_t pid_t;
+typedef __kernel_daddr_t daddr_t;
+typedef __kernel_key_t key_t;
+typedef __kernel_suseconds_t suseconds_t;
+typedef __kernel_timer_t timer_t;
+typedef __kernel_clockid_t clockid_t;
+typedef __kernel_mqd_t mqd_t;
+typedef __kernel_uid_t uid_t;
+typedef __kernel_gid_t gid_t;
+typedef __kernel_loff_t loff_t;
+
+/*
+ * The following typedefs are also protected by individual ifdefs for
+ * historical reasons:
+ */
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef __kernel_size_t size_t;
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+typedef __kernel_ssize_t ssize_t;
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __kernel_ptrdiff_t ptrdiff_t;
+#endif
+
+#ifndef _UINTPTR_T
+#define _UINTPTR_T
+typedef __kernel_uintptr_t uintptr_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+typedef __kernel_time_t time_t;
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef __kernel_clock_t clock_t;
+#endif
+
+#ifndef _CADDR_T
+#define _CADDR_T
+typedef __kernel_caddr_t caddr_t;
+#endif
+
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+
+typedef __u8 u_int8_t;
+typedef __s8 int8_t;
+typedef __u16 u_int16_t;
+typedef __s16 int16_t;
+typedef __u32 u_int32_t;
+typedef __s32 int32_t;
+
+#endif /* !(__BIT_TYPES_DEFINED__) */
+
+/* user-space uses stdint.h for these */
+#ifdef __KERNEL__
+
+typedef __u8 uint8_t;
+typedef __u16 uint16_t;
+typedef __u32 uint32_t;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __u64 uint64_t;
+typedef __u64 u_int64_t;
+typedef __s64 int64_t;
+#endif
+
+#endif /* __KERNEL_ */
+
+/* this is a special 64bit data type that is 8-byte aligned */
+#define aligned_u64 unsigned long long __attribute__((aligned(8)))
+
+#endif /* __KERNEL_STRICT_NAMES */
+
+#ifdef __KERNEL__
+/* _Bool is a base type in C99... what a bad name! */
+typedef _Bool bool;
+#else
+#include <stdbool.h>
+#include <stdint.h>
+#endif
+
+/*
+ * Below are truly LWK-specific types that should never collide with
+ * any application/library that wants lwk/types.h.
+ */
+
+/* Address types */
+typedef __kernel_uintptr_t paddr_t; /* physical address */
+typedef __kernel_uintptr_t vaddr_t; /* virtual address */
+typedef __kernel_uintptr_t kaddr_t; /* kernel virtual address */
+typedef __kernel_uintptr_t uaddr_t; /* user virtual address */
+
+/* Locality group ID */
+typedef unsigned int lgroup_t;
+
+#ifdef __CHECKER__
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+#ifdef __CHECK_ENDIAN__
+#define __bitwise __bitwise__
+#else
+#define __bitwise
+#endif
+
+typedef __u16 __bitwise __le16;
+typedef __u16 __bitwise __be16;
+typedef __u32 __bitwise __le32;
+typedef __u32 __bitwise __be32;
+typedef __u64 __bitwise __le64;
+typedef __u64 __bitwise __be64;
+
+#ifdef __KERNEL__
+typedef unsigned __bitwise__ gfp_t;
+#endif
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+#endif /* _LWK_TYPES_H */
--- /dev/null
+#ifndef _LWK_UNISTD_H
+#define _LWK_UNISTD_H
+
+/*
+ * Include machine specific syscall numbers
+ */
+#include <arch/unistd.h>
+
+#endif /* _LWK_UNISTD_H */
--- /dev/null
+#ifndef _LWK_UTS_H
+#define _LWK_UTS_H
+
+#include <lwk/compile.h> /* for UTS_MACHINE and UTS_VERSION */
+
+/*
+ * Defines for what uname() should return
+ * We trick user-level into thinking we are Linux for compatibility purposes.
+ */
+#ifndef UTS_LINUX_SYSNAME
+#define UTS_LINUX_SYSNAME "Linux"
+#endif
+
+#ifndef UTS_LINUX_RELEASE
+#define UTS_LINUX_RELEASE "2.6.23"
+#endif
+
+#ifndef UTS_NODENAME
+#define UTS_NODENAME "(none)" /* set by sethostname() */
+#endif
+
+#ifndef UTS_DOMAINNAME
+#define UTS_DOMAINNAME "(none)" /* set by setdomainname() */
+#endif
+
+#endif
--- /dev/null
+#ifndef _LWK_UTSNAME_H
+#define _LWK_UTSNAME_H
+
+#define __UTS_LEN 64
+
+struct utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+ char domainname[65];
+};
+
+#endif /* _LWK_UTSNAME_H */
--- /dev/null
+#ifndef _LWK_WAITQ_H
+#define _LWK_WAITQ_H
+
+#include <lwk/spinlock.h>
+#include <lwk/list.h>
+#include <lwk/task.h>
+
+typedef struct waitq {
+ spinlock_t lock;
+ struct list_head waitq;
+} waitq_t;
+
+typedef struct waitq_entry {
+ struct task_struct * task;
+ struct list_head link;
+} waitq_entry_t;
+
+#define DECLARE_WAITQ(name) \
+ waitq_t name = { \
+ .lock = SPIN_LOCK_UNLOCKED, \
+ .waitq = { &(name).waitq, &(name).waitq } \
+ }
+
+#define DECLARE_WAITQ_ENTRY(name, tsk) \
+ waitq_entry_t name = { \
+ .task = tsk, \
+ .link = { &(name).link, &(name).link } \
+ }
+
+extern void waitq_init(waitq_t *waitq);
+extern void waitq_init_entry(waitq_entry_t *entry, struct task_struct *task);
+extern bool waitq_active(waitq_t *waitq);
+extern void waitq_add_entry(waitq_t *waitq, waitq_entry_t *entry);
+extern void waitq_prepare_to_wait(waitq_t *waitq, waitq_entry_t *entry,
+ taskstate_t state);
+extern void waitq_finish_wait(waitq_t *waitq, waitq_entry_t *entry);
+extern void waitq_wakeup(waitq_t *waitq);
+
+/**
+ * This puts the task to sleep until condition becomes true.
+ * This must be a macro because condition is tested repeatedly, not just
+ * when wait_event() is first called.
+ */
+#define wait_event(waitq, condition) \
+do { \
+ DECLARE_WAITQ_ENTRY(__entry, current); \
+ for (;;) { \
+ waitq_prepare_to_wait(&waitq, &__entry, \
+ TASKSTATE_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ schedule(); \
+ } \
+ waitq_finish_wait(&waitq, &__entry); \
+} while (0)
+
+#endif
--- /dev/null
+#ifndef _LWK_XCALL_H
+#define _LWK_XCALL_H
+
+#include <lwk/cpumask.h>
+#include <lwk/idspace.h>
+#include <arch/xcall.h>
+
+int
+xcall_function(
+ cpumask_t cpu_mask,
+ void (*func)(void *info),
+ void * info,
+ bool wait
+);
+
+int
+arch_xcall_function(
+ cpumask_t cpu_mask,
+ void (*func)(void *info),
+ void * info,
+ bool wait
+);
+
+void
+xcall_reschedule(
+ id_t cpu
+);
+
+void
+arch_xcall_reschedule(
+ id_t cpu
+);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2003 Cray, Inc.
+ *
+ * The contents of this file are proprietary information of Cray Inc.
+ * and may not be disclosed without prior written consent.
+ */
+/*
+ *
+ * This code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+
+#ifndef __CRAY_EVENT_DEF_H__
+#define __CRAY_EVENT_DEF_H__
+
+
+#include <lwk/types.h>
+#include <lwk/time.h>
+#include <rca/rca_defs.h>
+
+typedef union rs_node_u {
+ /* Little Endian */
+ struct {
+ uint32_t _node_arch : 2; /* System architecture */
+ uint32_t _node_type : 6; /* Component type */
+ uint32_t _node_state : 7; /* Component state from SM */
+ uint32_t _node_is_svc : 1; /* Service node bit */
+
+ uint32_t _node_id : 16; /* Node and Seastar NID */
+
+ uint32_t : 2; /* Unused */
+ uint32_t _node_x : 6; /* What position in the row */
+ uint32_t _node_subtype : 4; /* Component subtype */
+ uint32_t _node_row : 4; /* Which row of cabinets */
+
+ uint32_t _node_cage : 4; /* Cage in the cabinet */
+ uint32_t _node_slot : 4; /* Slot in the cage */
+ uint32_t _node_modcomp : 4; /* Component on a module */
+ uint32_t _node_link : 4; /* Link on seastar */
+ } __attribute__((packed)) rs_node_s;
+ struct {
+ uint32_t :2, :6, :7, :1, :16, :2, :6, :4, :4; /* Unused fields */
+ uint32_t _node_startx : 8; /* What position in the row */
+ uint32_t _node_endx : 8; /* Which row of cabinets */
+ } __attribute__((packed)) rs_node_s1;
+ uint64_t rs_node_flat;
+} __attribute__((packed)) rs_node_t;
+
+/* TODO: this and RCA RS_MSG_LEN define needs to be taken out soon. */
+#ifndef RS_MSG_LEN
+#define RS_MSG_LEN 1
+#endif
+
+typedef uint32_t rs_error_code_t;
+typedef int32_t rs_event_code_t;
+
+
+// from rs_svc_id.h
+
+/* NOTE for the following event related structures:
+ * ###################################################################
+ * There are following restrictions for the L0 Opteron communication
+ * related structures.
+ * The elements must be aligned on 4-byte boundaries. The structure
+ * size must be a multiple of 4 bytes. Structures should be packed so
+ * that the compiler will not insert padding.
+ * ###################################################################
+ */
+typedef uint32_t rs_service_t;
+typedef uint32_t rs_instance_t;
+typedef uint32_t rs_priority_t;
+typedef uint32_t rs_flag_t;
+
+/*
+ * NOTE: This rs_service_id_t is packed. If we update this structure,
+ * we need to make sure that each element is 4-byte aligned,
+ * otherwise it might break the L0 Opteron communication (size
+ * of rs_service_id_t must be a multiple of 4bytes).
+ */
+typedef struct rs_service_id_s {
+ rs_instance_t svid_inst; /* a sequence identifier */
+ rs_service_t svid_type; /* the kind of service */
+ rs_node_t svid_node; /* the x.y.z coordinates */
+} __attribute__((packed)) rs_service_id_t;
+
+
+/* time structure
+ * rt_tv1 and rt_tv2 are hedges against field size inflation.
+ */
+typedef union rs_time_u {
+ struct timeval _rt_tv;
+ struct {
+ uint64_t _rt_tv1;
+ uint64_t _rt_tv2;
+ } rs_evtime_s; /* timeval needs to be adjusted for 32/64 bits */
+} rs_time_t;
+
+/*
+ * NOTE: This rs_event_t is packed. If we update this structure, we need to
+ * make sure that each element is 4-byte aligned, otherwise it might
+ * break the L0 Opteron communication (size of rs_event_t must be a
+ * multiple of 4bytes).
+ *
+ * event structure:
+ * may be used as a fixed or variable length event.
+ * In RCA's case, ev_data is fixed length and RS_MSG_LEN should be defined
+ * before inclusion of this file. ev_len here signifies the length in
+ * bytes of significant data in ev_data. The SMW, in contrast, treats events
+ * as variable length; RS_MSG_LEN is 1 and the actual length of the data is
+ * determined when the object is allocated. In this case the real length
+ * of ev_data is stored in ev_len. RCA related events has fixed length
+ * ev_data and RS_MSG_LEN is 256 (multiple of 4bytes). Same as the SMW
+ * events, real length of ev_data needs to be stored in ev_len.
+ */
+typedef struct rs_event_s {
+ rs_event_code_t ev_id; /* type of event */
+ uint32_t ev_seqnum; /* req/rsp sequence number */
+ rs_service_id_t ev_gen; /* what this event pertains to */
+ rs_service_id_t ev_src; /* creator of this event */
+ rs_flag_t ev_flag; /* any bit flags */
+ rs_time_t _ev_stp; /* time of event creation */
+ rs_priority_t ev_priority; /* priority [0 low, 9 high] */
+ int32_t ev_len; /* length of data */
+ char ev_data[RS_MSG_LEN]; /* payload (must be last) */
+} __attribute__((packed)) rs_event_t;
+
+#define rs_sizeof_event(data_length) \
+ (((int)(&((rs_event_t*)0)->ev_data)) + (data_length))
+
+#endif /* __CRAY_EVENT_DEF_H__ */
+
--- /dev/null
+/*
+ * Copyright (c) 2006 Cray, Inc.
+ *
+ * The contents of this file is proprietary information of Cray Inc.
+ * and may not be disclosed without prior written consent.
+ */
+/*
+ *
+ * This code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+
+#ifndef __L0RCA_CONFIG_H__
+#define __L0RCA_CONFIG_H__
+
+
+#include <rca/cray_event_def.h> /* rca_types.h includes rs_event.h */
+
+/*
+ * PKT_MODE* Registers:
+ *
+ * Writing a "0" to a bit in pkt_mode0 will atomically clear that bit
+ * in pkt_mode. Writing a "1" to a bit in pkt_mode1 will atomically
+ * set that bit in pkt_mode. Reading any of these will return the
+ * instantanious contents of pkt_mode. Writing pkt_mode replaces the
+ * contents non-atomically.
+ */
+typedef struct {
+ uint32_t pkt_mode;
+ uint32_t _pad1;
+ uint32_t pkt_mode0; /* atomically clear bits */
+ uint32_t _pad2;
+ uint32_t pkt_mode1; /* atomically set bits */
+} l0ssi_pkt_mode_t;
+
+#define phy_pkt_mode (*(l0ssi_pkt_mode_t *)PKT_MODE)
+
+#define PKT_MODE0 (uint32_t)&phy_pkt_mode.pkt_mode0
+#define PKT_MODE1 (uint32_t)&phy_pkt_mode.pkt_mode1
+
+/*
+ * How the RCA interrupts the L0.
+ *
+ * Writing a "0" to a bit in pkt_mode0 will atomically CLEAR that bit
+ * in pkt_mode. Writing a "1" to a bit in pkt_mode1 will atomically
+ * SET that bit in pkt_mode. Normally, pkt_mode should never be
+ * written directly. It can be read to get the current state of bits.
+ * In general, any set bits in pkt_mode will cause an interrupt to
+ * be raised to the L0, via the SSI and L0_FPGA.
+ *
+ * CAUTION: pkt_mode0 must only be written with "0"(s) in the
+ * position(s) to be cleared and "1"s everywhere else. I.e. it
+ * must be written with the value one would use to AND-out the
+ * bits. This is contrary to most SET/CLEAR register implementations.
+ *
+ * Bits in pkt_mode are assigned in l0ssi_intr.h
+ */
+typedef l0ssi_pkt_mode_t l0rca_intr_t;
+#define l0r_intr_get pkt_mode
+#define l0r_intr_clr pkt_mode0
+#define l0r_intr_set pkt_mode1
+
+/* Should be removed... */
+#define L0RCA_CONFIG L0RCA_CFG
+
+/* defined channel id */
+#define L0RCA_CH_EV_UP 0
+#define L0RCA_CH_EV_DOWN 1
+#define L0RCA_CH_CON_UP 2
+#define L0RCA_CH_CON_DOWN 3
+#define L0RCA_CH_KGDB_UP 4
+#define L0RCA_CH_KGDB_DOWN 5
+#define NUM_L0RCA_CHANNELS 6
+
+/* NOTE for the following L0 Opteron communication related structures:
+ * ###################################################################
+ * There are following restrictions for the L0 Opteron communication
+ * related structures.
+ * The elements must be aligned on 4-byte boundaries. The structure
+ * size must be a multiple of 4 bytes. Structures should be packed so
+ * that the compiler will not insert padding.
+ * ###################################################################
+ */
+
+/*
+ * l0rca_ch_data_t: channel buffer data structure
+ * NOTE: This l0rca_ch_data_t is packed. If we update this structure,
+ * we need to make sure that each element is 4-byte aligned,
+ * otherwise it might break the L0 Opteron communication (size of
+ * l0rca_ch_data_t must be a multiple of 4bytes).
+ * All communication channel uses rs_event_t so, the size of object
+ * in buffer is sizeof(rs_event_t). RCA events has fixed ev_data
+ * length (256) and num_obj is the number of events can be stored
+ * in the buffer.
+ *
+ * The *_intr_bit fields declare which bit in the PKT_MODE register
+ * is used for the channel interrupt. l0_intr_bit is for interrupts
+ * sent *to* the L0, while proc_intr_bit is for interrupts sent *to*
+ * the Opteron processor.
+ */
+typedef struct l0rca_ch_data_s {
+ uint32_t num_obj; /* number of objects */
+ uint32_t ridx; /* read index */
+ uint32_t widx; /* write index */
+ uint32_t l0_intr_bit; /* Opteron -> L0 intr assignment */
+ uint32_t proc_intr_bit; /* L0 -> Opteron intr assignment */
+} __attribute__((packed)) l0rca_ch_data_t;
+
+#define L0RCA_CONF_VERSION 2
+
+/*
+ * Circular Buffer Usage:
+ *
+ * When (widx == ridx), buffer is empty;
+ * ELSE When (widx - ridx < num_obj)), there are one or more
+ * available buffers.
+ *
+ * ridx and widx reflect the object index, not byte index.
+ *
+ * Restrictions:
+ *
+ * num_obj must be a power of 2 (i.e. (num_obj & (num_obj - 1)) == 0).
+ * Therefore indices are normalized with AND: idx = ridx & (num_obj - 1).
+ *
+ */
+
+/*
+ * NOTE: This l0rca_config_t is packed. If we update this sturcture,
+ * we need to make sure that each element is 4-byte aligned,
+ * otherwise it might break the L0 Opteron communication (size
+ * of l0rca_config_t must be a multiple of 4bytes).
+ *
+ * configuration data structure */
+typedef struct l0rca_config_s {
+ uint64_t l0rca_buf_addr; /* ch buffer addr */
+ uint64_t l0rca_l0_intr_addr; /* interrupt to L0 */
+ uint32_t version; /* config version */
+ rs_node_t proc_id; /* node id */
+ int32_t proc_num; /* proc number (0-3) */
+ int32_t reserved_1; /* reserved for future use */
+ int32_t reserved_2; /* reserved for future use */
+ /* channel data */
+ l0rca_ch_data_t chnl_data[NUM_L0RCA_CHANNELS];
+} __attribute__((packed)) l0rca_config_t;
+
+
+/*
+ * Definitions in the L0-reserved area of SIC RAM
+ */
+#define L0_SIC_RAM 0xfffff000
+#define L0_SIC_RAM_LEN 4096
+
+#define COLDSPIN 0xfffffe80
+#define L0RCA_CFG L0_SIC_RAM
+#define L0RCA_CFG_LEN (COLDSPIN - L0RCA_CFG)
+
+/*
+ * The following provides an abstraction for accessing the RAM
+ * location of the config structure. It set phy_l0r_cfg to the
+ * l0rca_config_t physical address defined as L0RCA_CONFIG.
+ * To use it, include this header file and then access the
+ * config area as &phy_l0r_cfg.<element>.
+ */
+#define phy_l0r_cfg (*(l0rca_config_t *)L0RCA_CFG)
+
+
+#endif /* !__L0RCA_CONFIG_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2003 Cray, Inc.
+ *
+ * The contents of this file are proprietary information of Cray Inc.
+ * and may not be disclosed without prior written consent.
+ */
+/*
+ * This code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+
+
+#ifndef __RCA_DEFS_H__
+#define __RCA_DEFS_H__
+
+
+// Definitions moved from rs_event_name.h
+/* Console log */
+#define RS_CONSOLE_LOG (28)
+/* Debug */
+#define RS_CONSOLE_INPUT (51)
+#define RS_KGDB_INPUT (52)
+#define RS_KGDB_OUTPUT (53)
+
+#define RS_DBG_CLASS 0x00010000
+#define RS_LOG_CLASS 0x00001000
+
+/* Console log */
+#define ec_console_log (RS_LOG_CLASS | RS_CONSOLE_LOG)
+
+/* Debug */
+#define ec_console_input (RS_DBG_CLASS | RS_CONSOLE_INPUT)
+#define ec_kgdb_input (RS_DBG_CLASS | RS_KGDB_INPUT)
+#define ec_kgdb_output (RS_DBG_CLASS | RS_KGDB_OUTPUT)
+
+#define RS_RCA_SVC_CLASS 7 /* RCA service class */
+/* service type class bits */
+#define RS_CLASS_BITS 8
+#define RS_CLASS_MASK ((1 << RS_CLASS_BITS) - 1)
+
+#define RS_SUBCLASS_BITS 24
+#define RS_SUBCLASS_MASK ((1 << RS_SUBCLASS_BITS) -1)
+
+/* generate service type */
+#define RCA_MAKE_SERVICE_INDEX(class, subclass) \
+ ( (((class)&RS_CLASS_MASK) << RS_SUBCLASS_BITS) | \
+ ((subclass) & RS_SUBCLASS_MASK) )
+
+
+/* macro for setting up service id */
+#define RS_MKSVC(i, t, n) (rs_service_id_t){(i), (t), (n)}
+
+
+/* need to set RS_MSG_LEN before including rs_event.h */
+#define RS_MSG_LEN 256
+
+#define RCA_SVC_CLASS RS_RCA_SVC_CLASS /* 7 */
+
+#define RCA_CLASS_BITS RS_CLASS_BITS
+#define RCA_CLASS_MASK RS_CLASS_MASK
+
+/* number of bits client may use in subclass */
+#define RCA_SUBCLASS_BITS RS_SUBCLASS_BITS
+#define RCA_SUBCLASS_MASK RS_SUBCLASS_MASK
+
+#define RCA_INST_ANY 0xffffffffUL
+
+/* system console log */
+#define RCA_SVCTYPE_CONS RCA_MAKE_SERVICE_INDEX(RCA_SVC_CLASS, 6)
+#define RCA_SVCTYPE_TEST0 RCA_MAKE_SERVICE_INDEX(RCA_SVC_CLASS, 10)
+
+/* rs_service_id_t constants and helpers */
+#define RCA_MKSVC(i, t, n) RS_MKSVC((i), (t), (n))
+
+#define RCA_LOG_DEBUG 7
+
+#endif /* !_RCA_TYPES_H_ */
--- /dev/null
+/*
+ * Copyright (c) 2003 Cray Inc.
+ *
+ * The contents of this file is proprietary information of Cray Inc.
+ * and may not be disclosed without prior written consent.
+ *
+ */
+/*
+ * This code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#ifndef __RCA_L0_H__
+#define __RCA_L0_H__
+
+#include <rca/l0rca_config.h>
+
+
+/*
+ * Macros to read/write Seastar Scratch RAM for everyone else.
+ */
+#define SSMEMPUT(dest,src,nb) memcpy((void *)dest,(void *)src,nb)
+#define SSMEMGET(dest,src,nb) memcpy((void *)dest,(void *)src,nb)
+
+#define SSPUT64(to_ptr,value) (*(to_ptr) = (value))
+#define SSPUT32(to_ptr,value) (*(to_ptr) = (value))
+
+#define SSGET64(from_ptr,to_var) ((to_var) = *(from_ptr))
+#define SSGET32(from_ptr,to_var) ((to_var) = *(from_ptr))
+
+/* TODO - Revisit these later */
+#define LOCK_CHANNEL(chn_num)
+#define UNLOCK_CHANNEL(chn_num)
+
+typedef int (*l0rca_down_handle_t)(int chn_num, rs_event_t* buf, int32_t num);
+typedef int (*l0rca_up_handle_t)(int chn_num);
+
+typedef struct l0rca_ch_status {
+ uint32_t num_obj; /* number of objects */
+ uint32_t ridx; /* read index */
+ uint32_t widx; /* write index */
+ uint32_t reg_count;
+ rs_event_t* ch_buf_ptr;
+} l0rca_ch_status_t;
+
+/*
+ * API defines
+ * TODO - All API calls defined here may not be implemented in l0rca.c.
+ * These are to be implemented as needed.
+ */
+
+/* NOTE
+ * download means data transfer from the L0 to the Opteron
+ * upload means data transfer from the Opteron to the LO;
+ */
+
+#ifdef STANDALONE_DIAGS
+/*
+ * Checks if the channel is ready or not (full).
+ * Argument: int channel
+ * Returns:
+ * 1 if ready (not_full)
+ * 0 if not ready
+ */
+int l0rca_ch_send_ready(int ch_num);
+
+/*
+ * Clears l0rca_early_cfg.initialized.
+ * This function is required for memtest. Memtest has to move the location
+ * of the storage area for the config in order to move on to the next
+ * region to do the memory test.
+ */
+void l0rca_clear_initialized(void);
+#endif
+
+/*
+ * Function: l0rca_init_config
+ *
+ * Description: Read L0 - RCA communication config structure and populate
+ * our personal copy. If there is any error, the OS panics
+ * as not being able to communicate with L0 is a total disaster.
+ * If already initialized then returns siliently.
+ *
+ * Arguments: None.
+ *
+ * Returns: None
+ */
+void l0rca_init_config(void);
+
+/*
+ * Function: register_ch_down
+ *
+ * Description: Register function for the download channel. It is expected that
+ * there be at most one registered user for a download channel. This user
+ * provides a callback to be invoked when data from L0 arrives on the channel.
+ *
+ * Arguments: int ch_num IN: channel number to register on
+ * l0rca_down_handle_t handler IN: callback routine
+ * int poll IN: if > zero - duration in ms to check for event
+ * if = zero - event arrival is interrupt driven.
+ * if < zero - do nothing. It is assumed that the user
+ * has her own means to check for event arrival.
+ *
+ * Returns: EBUSY - If another user is already registered.
+ * zero (SUCCESS) otherwise.
+ */
+int register_ch_down(int ch_num, l0rca_down_handle_t handler, int poll);
+
+/*
+ * Function: unregister_ch_down
+ *
+ * Description: Unregister function for the download channel. Use to indicate
+ * that the channel is no longer to be used.
+ *
+ * Arguments: int ch_num IN: channel number to unregister
+ *
+ * Returns: zero (SUCCESS)
+ */
+int unregister_ch_down(int ch_num);
+
+/*
+ * Function: register_ch_up
+ *
+ * Description: Register function for the upload channel. It is expected that
+ * there be at most one registered user for an upload channel. This user
+ * provides a callback to be invoked when the buffer drains below tshhld
+ * (only if the buffer became full last time the tshhld was crossed)
+ *
+ * Arguments: int ch_num IN: channel number to register on
+ * l0rca_up_handle_t handler IN: callback routine
+ * int tshhld IN: buffer to drain before invoking callback; ignored
+ * if poll is negative.
+ * int poll IN: if > zero - duration in ms to check for buffer drain
+ * if = zero - tx done interrupt invokes callback
+ * if < zero - do nothing. It is assumed that the user
+ * has her own means to check for buffer drain
+ *
+ * Returns: -EBUSY - If another user is already registered.
+ * -EINVAL - if ch_num is not in range.
+ * zero (SUCCESS) otherwise.
+ */
+int register_ch_up(int ch_num, l0rca_up_handle_t handler, int tshhld, int poll);
+
+/*
+ * Function: unregister_ch_up
+ *
+ * Description: Unregister function for the download channel. Use to indicate
+ * that the channel is no longer to be used.
+ *
+ * Arguments: int ch_num IN: channel number to unregister
+ *
+ * Returns: zero (SUCCESS)
+ */
+int unregister_ch_up(int ch_num);
+
+/*
+ * Function: ch_send_data
+ *
+ * Description: Sends data towards the L0.
+ * The data that buf points to is sent as the payload in an rs_event structure.
+ * The header is a separate parameter and the send routine directly copies
+ * the header and the data into the circular buffer, thus avoiding a copy.
+ *
+ * Arguments: int ch_num IN: channel number on which to send data
+ * rs_event_t *ev_hdr IN: Header without len & timestamp
+ * void* buf IN: Buffer with data
+ * unsigned int len IN: length of data to transfer
+ *
+ * Returns: EBUSY - If the circular channel buffer is full.
+ * EINVAL - if no user registered on the channel (Debug only)
+ * EFAULT - if buf or ev_hdr is NULL (Debug only)
+ * E2BIG - if len exceeds max event payload (RS_MSG_LEN) (Debug only)
+ * zero (SUCCESS) otherwise.
+ *
+ * Notes: data in buf will be copied to the channel buffer, therfore, upon
+ * return, user can free the buf.
+ */
+int ch_send_data(int ch_num, const rs_event_t *ev_hdr,
+ void* buf, unsigned int len);
+
+/*
+ * Function: ch_send_event
+ *
+ * Description: Sends an event to L0.
+ *
+ * Arguments: int ch_num IN: channel number on which to send the event
+ * const rs_event_t *evp IN: EVent to send
+ *
+ * Returns: -EINVAL - if no user registered on the channel (Debug only)
+ * -EFAULT - if ev_hdr is NULL (Debug only)
+ * zero - SUCCESS, event sent.
+ * +EBUSY - Event not sent. Sender should retry.
+ *
+ * Notes: The event will be copied to the channel buffer, therfore, upon
+ * return, user may free the space associated with the event
+ */
+int ch_send_event(int ch_num, const rs_event_t *evp);
+
+/*
+ * Function: ch_status
+ *
+ * Description: Obtain status on the channel
+ *
+ * Arguments: int ch_num IN: channel number for which to obtain status
+ * Arguments: l0rca_ch_status_t *st OUT: status of the channel.
+ *
+ * Returns: zero (SUCCESS).
+ *
+ * Notes: The status represents the snapshot at the time of invocation.
+ */
+int ch_status(int ch_num, l0rca_ch_status_t *st);
+
+/*
+ * Function: l0rca_ch_get_event
+ *
+ * Description: Read the next available event (if any). This allows the caller
+ * to check for incoming events. It is usefult for those callers
+ * that do not have a receive callback.
+ *
+ * Arguments: int ch_num IN: channel number from which to return event
+ * Arguments: rs_event_t *evp: Pointer where the event may be placed
+ *
+ * Returns: 0 or 1 - No of events returned (0 ==> No event otherwise always 1)
+ * < 0 - errors such as channel not registered etc.
+ *
+ * Note: The receive callback is the preferred way to handle incoming events.
+ * This API call should only be used in cases where a receive callback
+ * mechanism is not feasible. For example, when interupts are disabled and
+ * incoming events need to be serviced. An example user is a kernel
+ * debugger.
+ */
+int l0rca_ch_get_event(int ch_num, rs_event_t *evp);
+
+/*
+ * Function: l0rca_poll_callback
+ *
+ * Description: Scan the incoming channels and call the receive callback
+ * (if any) in case an event is pending to be processed.
+ * Update the read pointer. Next scan the outgoing channels
+ * and if the channel was full, call the transmit done callback
+ * so that events may be sent.
+ *
+ * Arguments: None
+ *
+ * Returns: 0 if no events were processed, else 1.
+ *
+ * Note: It is possible that this routine is called from interrupt
+ * context. The callbacks invoked *must* not block.
+ */
+int l0rca_poll_callback(void);
+
+/*
+ * Function: l0rca_get_proc_id
+ *
+ * Description: Return the node/processor id.
+ *
+ * Arguments: None
+ *
+ * Returns: The proc id.
+ */
+rs_node_t l0rca_get_proc_id(void);
+
+/*
+ * Function: l0rca_get_max_xyz
+ *
+ * Description: Returns the current system dimensions. This information
+ * can be used find the coordinates of the node in the system.
+ *
+ * Arguments: int32_t *mx OUT: The x value is stored here after return
+ * int32_t *my OUT: The y value is stored here after return
+ * int32_t *mz OUT: The z value is stored here after return
+ *
+ * Returns: No return value.
+ */
+void l0rca_get_max_xyz(int32_t *mx, int32_t *my, int32_t *mz);
+
+/*
+ * Function: l0rca_event_data
+ *
+ * Description: Return a pointer to the data portion and length of the
+ * data portion of the event.
+ *
+ * Arguments: rs_event_t *evp IN: Event whose data is of interest
+ * void **data OUT: Upon return will point to data portion of event
+ * int32_t *len OUT: Upon return will have the length of the data
+ * portion of the event
+ *
+ * Returns: No Return Value.
+ */
+void l0rca_event_data(rs_event_t *evp, void **data, int32_t *len);
+
+
+#ifdef __KERNEL__
+extern int l0_gdb_init(void);
+extern int l0rca_kgdb_down_getc(void);
+extern int gdb_hook(void);
+extern int gdb_getc(int wait);
+extern int gdb_putc(char chr);
+extern int putDebugPacket(char *buf, int n);
+#endif
+
+#endif /* __RCA_L0_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2004 Cray Inc.
+ *
+ * The contents of this file is proprietary information of Cray Inc.
+ * and may not be disclosed without prior written consent.
+ *
+ */
+/*
+ * This code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+
+#ifndef __RCA_L0_LINUX_H__
+#define __RCA_L0_LINUX_H__
+
+#include <lwk/version.h>
+
+/*
+ * LINUX:
+ * This works as long as the physical address is below 4GB and a static
+ * page table mapping has been setup for this address. This macro is
+ * intended to be used before ioremap() is available for e.g in the case
+ * of early_printk.
+ */
+#define rca_l0_comm_va(addr) \
+ (void*)(((unsigned long)0xFFFFFFFF << 32) | (unsigned long)(addr))
+
+extern int l0rca_os_init(void);
+
+#endif /* __RCA_L0_LINUX_H__ */
--- /dev/null
+config KALLSYMS
+ bool
+ default "y"
+ help
+ Say Y here to let the kernel print out symbolic crash information and
+ symbolic stack backtraces. This increases the size of the kernel
+ somewhat, as all symbols have to be loaded into the kernel image.
+
+config KALLSYSM_ALL
+ bool
+ default "y"
+ help
+ Normally kallsyms only contains the symbols of functions, for nicer
+ OOPS messages. Some debuggers can use kallsyms for other
+ symbols too: say Y here to include all symbols, if you need them
+ and you don't care about adding to the size of your kernel.
+
--- /dev/null
+#
+# Makefile for the LWK.
+#
+
+obj-y := main.o version.o
+
+# files to be removed upon make clean
+clean-files := ../include/lwk/compile.h
+
+# dependencies on generated files need to be listed explicitly
+
+$(obj)/version.o: include/lwk/compile.h
+
+
+# compile.h changes depending on hostname, generation number, etc,
+# so we regenerate it always.
+# mkcompile_h will make sure to only update the
+# actual file if its content has changed.
+
+include/lwk/compile.h: FORCE
+ @echo ' CHK $@'
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+ "$(UTS_MACHINE)" "$(CC) $(CFLAGS)"
--- /dev/null
+#include <lwk/init.h>
+#include <lwk/kernel.h>
+#include <lwk/params.h>
+#include <lwk/console.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/percpu.h>
+#include <lwk/smp.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/delay.h>
+#include <lwk/bootmem.h>
+#include <lwk/aspace.h>
+#include <lwk/task.h>
+#include <lwk/sched.h>
+#include <lwk/timer.h>
+
+/**
+ * Pristine copy of the LWK boot command line.
+ */
+char lwk_command_line[COMMAND_LINE_SIZE];
+
+
+/**
+ * This is the architecture-independent kernel entry point. Before it is
+ * called, architecture-specific code has done the bare minimum initialization
+ * necessary. This function initializes the kernel and its various subsystems.
+ * It calls back to architecture-specific code at several well defined points,
+ * which all architectures must implement (e.g., setup_arch()).
+ */
+void
+start_kernel()
+{
+ unsigned int cpu;
+ unsigned int timeout;
+ int status;
+
+ /*
+ * Parse the kernel boot command line.
+ * This is where boot-time configurable variables get set,
+ * e.g., the ones with param() and driver_param() specifiers.
+ */
+ parse_params(lwk_command_line);
+
+ /*
+ * Initialize the console subsystem.
+ * printk()'s will be visible after this.
+ */
+ console_init();
+
+ /*
+ * Hello, Dave.
+ */
+ printk(lwk_banner);
+ printk(KERN_DEBUG "%s\n", lwk_command_line);
+
+ /*
+ * Do architecture specific initialization.
+ * This detects memory, CPUs, etc.
+ */
+ setup_arch();
+
+ /*
+ * Initialize the kernel memory subsystem. Up until now, the simple
+ * boot-time memory allocator (bootmem) has been used for all dynamic
+ * memory allocation. Here, the bootmem allocator is destroyed and all
+ * of the free pages it was managing are added to the kernel memory
+ * pool (kmem) or the user memory pool (umem).
+ *
+ * After this point, any use of the bootmem allocator will cause a
+ * kernel panic. The normal kernel memory subsystem API should be used
+ * instead (e.g., kmem_alloc() and kmem_free()).
+ */
+ mem_subsys_init();
+
+ /*
+ * Initialize the address space management subsystem.
+ */
+ aspace_subsys_init();
+
+ /*
+ * Initialize the task management subsystem.
+ */
+ task_subsys_init();
+
+ /*
+ * Initialize the task scheduling subsystem.
+ */
+ sched_subsys_init();
+
+ /*
+ * Initialize the task scheduling subsystem.
+ */
+ timer_subsys_init();
+
+ /*
+ * Boot all of the other CPUs in the system, one at a time.
+ */
+ printk(KERN_INFO "Number of CPUs detected: %d\n", num_cpus());
+ for_each_cpu_mask(cpu, cpu_present_map) {
+ /* The bootstrap CPU (that's us) is already booted. */
+ if (cpu == 0) {
+ cpu_set(cpu, cpu_online_map);
+ continue;
+ }
+
+ printk(KERN_DEBUG "Booting CPU %u.\n", cpu);
+ arch_boot_cpu(cpu);
+
+ /* Wait for ACK that CPU has booted (5 seconds max). */
+ for (timeout = 0; timeout < 50000; timeout++) {
+ if (cpu_isset(cpu, cpu_online_map))
+ break;
+ udelay(100);
+ }
+
+ if (!cpu_isset(cpu, cpu_online_map))
+ panic("Failed to boot CPU %d.\n", cpu);
+ }
+
+ /*
+ * Start up user-space...
+ */
+ printk(KERN_INFO "Loading initial user-level task (init_task)...\n");
+ if ((status = create_init_task()) != 0)
+ panic("Failed to create init_task (status=%d).", status);
+
+ schedule(); /* This should not return */
+ BUG();
+}
--- /dev/null
+/*
+ * linux/init/version.c
+ *
+ * Copyright (C) 1992 Theodore Ts'o
+ *
+ * May be freely distributed as part of Linux.
+ */
+
+#include <lwk/compile.h>
+#include <lwk/version.h>
+#include <lwk/uts.h>
+#include <lwk/utsname.h>
+
+const char lwk_banner[] =
+ "LWK version " UTS_RELEASE " (" LWK_COMPILE_BY "@"
+ LWK_COMPILE_HOST ") (" LWK_COMPILER ") " UTS_VERSION
+ "\n";
+
+/**
+ * User-level apps call the uname() system call to figure out basic
+ * information about the system they are running on, as indicated
+ * by this structure. We report back that we are Linux since that
+ * is what standard Linux executables expect (UTS_LINUX_SYSNAME and
+ * UTS_LINUX_RELEASE).
+ */
+struct utsname linux_utsname = {
+ .sysname = UTS_LINUX_SYSNAME,
+ .nodename = UTS_NODENAME,
+ .release = UTS_LINUX_RELEASE,
+ .version = UTS_VERSION,
+ .machine = UTS_MACHINE,
+ .domainname = UTS_DOMAINNAME
+};
+
--- /dev/null
+obj-y := console.o printk.o spinlock.o params.o driver.o cpuinfo.o panic.o \
+ resource.o kallsyms.o extable.o show.o elf.o time.o xcall.o \
+ idspace.o htable.o elf_liblwk.o task.o sched.o waitq.o \
+ timer.o init_task.o
+obj-y += linux_syscalls/
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/console.h>
+#include <lwk/spinlock.h>
+#include <lwk/params.h>
+#include <lwk/driver.h>
+#include <lwk/errno.h>
+#include <arch/uaccess.h>
+
+/**
+ * List of all registered consoles in the system.
+ *
+ * Kernel messages output via printk() will be written to
+ * all consoles in this list.
+ */
+static LIST_HEAD(console_list);
+
+/**
+ * Serializes access to the console.
+ */
+static DEFINE_SPINLOCK(console_lock);
+
+/**
+ * Holds a comma separated list of consoles to configure.
+ */
+static char console_str[128];
+param_string(console, console_str, sizeof(console_str));
+
+/**
+ * Registers a new console.
+ */
+void console_register(struct console *con)
+{
+ list_add(&con->next, &console_list);
+}
+
+/**
+ * Writes a string to all registered consoles.
+ */
+void console_write(const char *str)
+{
+ struct console *con;
+ unsigned long flags;
+
+ spin_lock_irqsave(&console_lock, flags);
+ list_for_each_entry(con, &console_list, next)
+ con->write(con, str);
+ spin_unlock_irqrestore(&console_lock, flags);
+}
+
+/**
+ * Initializes the console subsystem; called once at boot.
+ */
+void console_init(void)
+{
+ char *p, *con;
+
+ // console_str contains comma separated list of console
+ // driver names. Try to install a driver for each
+ // console name con.
+ p = con = console_str;
+ while (*p != '\0') {
+ if (*p == ',') {
+ *p = '\0'; // null terminate con
+ if (driver_init_by_name(con))
+ printk(KERN_WARNING
+ "failed to install console=%s\n", con);
+ con = p + 1;
+ }
+ ++p;
+ }
+
+ // Catch the last one
+ if (p != console_str)
+ driver_init_by_name(con);
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/cpuinfo.h>
+
+/**
+ * Info structure for each CPU in the system.
+ * Array is indexed by logical CPU ID.
+ */
+struct cpuinfo cpu_info[NR_CPUS];
+
+/**
+ * Map of all available CPUs.
+ * This map represents logical CPU IDs.
+ */
+cpumask_t cpu_present_map;
+
+/**
+ * Map of all booted CPUs.
+ * This map represents logical CPU IDs.
+ * It will be a subset of cpu_present_map (usually identical after boot).
+ */
+cpumask_t cpu_online_map;
+
+/**
+ * Prints the input cpuinfo structure to the console.
+ */
+void print_cpuinfo(struct cpuinfo *c)
+{
+ printk("logical cpu id\t: %u\n", c->logical_id);
+ print_arch_cpuinfo(c);
+}
+
--- /dev/null
+#include <lwk/driver.h>
+#include <lwk/string.h>
+
+/**
+ * Searches for the specified driver name and calls its init() function.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+int driver_init_by_name(const char *name)
+{
+ unsigned int i;
+ struct driver_info * drvs = __start___driver_table;
+ unsigned int num_drvs = __stop___driver_table
+ - __start___driver_table;
+
+ for (i = 0; i < num_drvs; i++) {
+ if (strcmp(name, drvs[i].name) == 0) {
+ if (drvs[i].init_called)
+ return -1;
+ drvs[i].init_called = 1;
+ drvs[i].init();
+ return 0;
+ }
+ }
+ return -1;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/elf.h>
+#include <arch/uaccess.h>
+
+int
+elf_hwcap(id_t cpu, uint32_t *hwcap)
+{
+ if (!cpu_isset(cpu, cpu_online_map))
+ return -ENOENT;
+ *hwcap = ELF_HWCAP(cpu);
+ return 0;
+}
+
+int
+sys_elf_hwcap(id_t cpu, uint32_t __user *hwcap)
+{
+ int status;
+ uint32_t _hwcap;
+
+ if ((status = elf_hwcap(cpu, &_hwcap)) != 0)
+ return status;
+
+ if (hwcap && copy_to_user(hwcap, &_hwcap, sizeof(_hwcap)))
+ return -EINVAL;
+
+ return 0;
+}
--- /dev/null
+../user/liblwk/elf.c
\ No newline at end of file
--- /dev/null
+/* Rewritten by Rusty Russell, on the backs of many others...
+ Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+#include <lwk/init.h>
+#include <lwk/extable.h>
+#include <arch/uaccess.h>
+#include <arch/sections.h>
+
+extern struct exception_table_entry __start___ex_table[];
+extern struct exception_table_entry __stop___ex_table[];
+
+/* Sort the kernel's built-in exception table */
+void __init sort_exception_table(void)
+{
+ sort_extable(__start___ex_table, __stop___ex_table);
+}
+
+/* Given an address, look for it in the exception table. */
+const struct exception_table_entry *search_exception_table(unsigned long addr)
+{
+ const struct exception_table_entry *e;
+
+ e = search_extable(__start___ex_table, __stop___ex_table-1, addr);
+ return e;
+}
+
+int core_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext &&
+ addr <= (unsigned long)_etext)
+ return 1;
+
+ if (addr >= (unsigned long)_sinittext &&
+ addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+}
+
+int __kernel_text_address(unsigned long addr)
+{
+ if (core_kernel_text(addr))
+ return 1;
+ return 0;
+}
+
+int kernel_text_address(unsigned long addr)
+{
+ if (core_kernel_text(addr))
+ return 1;
+ return 0;
+}
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <lwk/kernel.h>
+#include <lwk/list.h>
+#include <lwk/htable.h>
+#include <lwk/hash.h>
+
+struct htable {
+ size_t tbl_order;
+ struct hlist_head * tbl;
+ size_t obj_key_offset;
+ size_t obj_link_offset;
+ size_t num_entries;
+};
+
+static id_t
+obj2key(const struct htable *ht, const void *obj)
+{
+ return *((id_t *)((uintptr_t)obj + ht->obj_key_offset));
+}
+
+static struct hlist_node *
+obj2node(const struct htable *ht, const void *obj)
+{
+ return (struct hlist_node *)((uintptr_t)obj + ht->obj_link_offset);
+}
+
+static void *
+node2obj(const struct htable *ht, const struct hlist_node *node)
+{
+ return (void *)((uintptr_t)node - ht->obj_link_offset);
+}
+
+static id_t
+node2key(const struct htable *ht, const struct hlist_node *node)
+{
+ return obj2key(ht, node2obj(ht, node));
+}
+
+static struct hlist_head *
+key2head(const struct htable *ht, id_t key)
+{
+ return &ht->tbl[hash_long(key, ht->tbl_order)];
+}
+
+static struct hlist_head *
+obj2head(const struct htable *ht, const void *obj)
+{
+ return &ht->tbl[hash_long(obj2key(ht, obj), ht->tbl_order)];
+}
+
+int
+htable_create(size_t tbl_order,
+ size_t obj_key_offset, size_t obj_link_offset, htable_t *tbl)
+{
+ struct htable *ht;
+ size_t tbl_size;
+
+ if (!(ht = kmem_alloc(sizeof(*ht))))
+ return -ENOMEM;
+
+ ht->tbl_order = tbl_order;
+ tbl_size = (1 << tbl_order);
+
+ if (!(ht->tbl = kmem_alloc(tbl_size * sizeof(struct hlist_head))))
+ return -ENOMEM;
+
+ ht->obj_key_offset = obj_key_offset;
+ ht->obj_link_offset = obj_link_offset;
+ ht->num_entries = 0;
+
+ *tbl = ht;
+ return 0;
+}
+
+int
+htable_destroy(htable_t tbl)
+{
+ struct htable *ht = tbl;
+ if (ht->num_entries)
+ return -EEXIST;
+ kmem_free(ht->tbl);
+ kmem_free(ht);
+ return 0;
+}
+
+int
+htable_add(htable_t tbl, void *obj)
+{
+ struct htable *ht = tbl;
+ hlist_add_head(obj2node(ht, obj), obj2head(ht, obj));
+ ++ht->num_entries;
+ return 0;
+}
+
+int
+htable_del(htable_t tbl, void *obj)
+{
+ struct htable *ht = tbl;
+ struct hlist_node *node;
+ hlist_for_each(node, obj2head(ht, obj)) {
+ if (obj == node2obj(ht, node)) {
+ hlist_del(node);
+ --ht->num_entries;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+void *
+htable_lookup(htable_t tbl, id_t key)
+{
+ struct htable *ht = tbl;
+ struct hlist_node *node;
+ hlist_for_each(node, key2head(ht, key)) {
+ if (key == node2key(ht, node))
+ return node2obj(ht, node);
+ }
+ return NULL;
+}
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <lwk/kernel.h>
+#include <lwk/idspace.h>
+#include <lwk/log2.h>
+#include <arch/page.h>
+
+struct idspace {
+ id_t min_id;
+ id_t max_id;
+ size_t size;
+ size_t ids_in_use;
+ size_t offset;
+ void * bitmap;
+};
+
+static size_t
+calc_order(struct idspace *idspace)
+{
+ size_t pages = DIV_ROUND_UP(idspace->size, PAGE_SIZE * 8);
+ return roundup_pow_of_two(pages);
+}
+
+int
+idspace_create(id_t min_id, id_t max_id, idspace_t *idspace)
+{
+ struct idspace *spc;
+
+ if ((min_id == ANY_ID) || (max_id == ANY_ID))
+ return -EINVAL;
+
+ if (min_id > max_id)
+ return -EINVAL;
+
+ if (!idspace)
+ return -EINVAL;
+
+ if (!(spc = kmem_alloc(sizeof(*spc))))
+ return -ENOMEM;
+
+ spc->min_id = min_id;
+ spc->max_id = max_id;
+ spc->size = max_id - min_id + 1;
+ spc->ids_in_use = 0;
+ spc->offset = 0;
+
+ if (!(spc->bitmap = kmem_get_pages(calc_order(spc)))) {
+ kmem_free(spc);
+ return -ENOMEM;
+ }
+
+ *idspace = spc;
+
+ return 0;
+}
+
+int
+idspace_destroy(idspace_t idspace)
+{
+ struct idspace *spc = idspace;
+
+ if (!spc)
+ return -EINVAL;
+
+ kmem_free_pages(spc->bitmap, calc_order(spc));
+ kmem_free(spc);
+
+ return 0;
+}
+
+int
+idspace_alloc_id(idspace_t idspace, id_t request, id_t *id)
+{
+ struct idspace *spc = idspace;
+ unsigned int bit;
+
+ if (!spc)
+ return -EINVAL;
+
+ if ((request != ANY_ID) &&
+ ((request < spc->min_id) || (request > spc->max_id)))
+ return -EINVAL;
+
+ if (spc->size == spc->ids_in_use)
+ return -ENOENT;
+
+ if (request == ANY_ID) {
+ /* Allocate any available id */
+ bit = find_next_zero_bit(spc->bitmap, spc->size, spc->offset);
+ /* Handle wrap-around */
+ if (bit == spc->size)
+ bit = find_next_zero_bit(spc->bitmap, spc->offset, 0);
+ /* Next time start looking at the next id */
+ spc->offset = bit + 1;
+ if (spc->offset == spc->size)
+ spc->offset = 0;
+ } else {
+ /* Allocate a specific ID */
+ bit = request - spc->min_id;
+ }
+
+ if (test_and_set_bit(bit, spc->bitmap))
+ return -EBUSY;
+
+ ++spc->ids_in_use;
+ if (id)
+ *id = bit + spc->min_id;
+
+ return 0;
+}
+
+int
+idspace_free_id(idspace_t idspace, id_t id)
+{
+ struct idspace *spc = idspace;
+ unsigned int bit;
+
+ if (!spc)
+ return -EINVAL;
+
+ if ((id == ANY_ID) || (id < spc->min_id) || (id > spc->max_id))
+ return -EINVAL;
+
+ bit = id - spc->min_id;
+ if (test_and_clear_bit(bit, spc->bitmap) == 0)
+ return -ENOENT;
+
+ --spc->ids_in_use;
+
+ return 0;
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/smp.h>
+#include <lwk/params.h>
+#include <lwk/init.h>
+#include <lwk/elf.h>
+
+/**
+ * Maximum number of arguments and environment variables that may
+ * be passed to the init_task.
+ */
+#define INIT_MAX_ARGC 32
+#define INIT_MAX_ENVC 32
+
+/**
+ * Maximum length of the init_argv= and init_envp= strings on the
+ * kernel boot command line.
+ */
+#define INIT_ARGV_LEN 1024
+#define INIT_ENVP_LEN 1024
+
+/**
+ * Amount of memory to reserve for the init_task's heap.
+ */
+unsigned long init_heap_size = (1024 * 1024 * 4); /* 4 MB */
+param(init_heap_size, ulong);
+
+/**
+ * Amount of memory to reserve for the init_task's stack.
+ */
+unsigned long init_stack_size = (1024 * 256); /* 256 KB */
+param(init_stack_size, ulong);
+
+/**
+ * Arguments to pass to the init_task.
+ */
+static char init_argv_str[INIT_ARGV_LEN] = { 0 };
+param_string(init_argv, init_argv_str, sizeof(init_argv_str));
+
+/**
+ * Environment to pass to the init_task.
+ */
+static char init_envp_str[INIT_ENVP_LEN] = { 0 };
+param_string(init_envp, init_envp_str, sizeof(init_envp_str));
+
+/**
+ * Creates the init_task.
+ */
+int
+create_init_task(void)
+{
+ int status;
+ start_state_t start_state;
+
+ if (!init_elf_image) {
+ printk("No init_elf_image found.\n");
+ return -EINVAL;
+ }
+
+ /* Initialize the start_state fields that we know up-front */
+ start_state.uid = 0;
+ start_state.gid = 0;
+ start_state.cpu_id = this_cpu;
+ start_state.cpumask = NULL;
+
+ /* This initializes start_state aspace_id, entry_point, and stack_ptr */
+ status =
+ elf_load(
+ __va(init_elf_image),
+ init_elf_image,
+ "init_task",
+ INIT_ASPACE_ID,
+ PAGE_SIZE,
+ init_heap_size,
+ init_stack_size,
+ init_argv_str,
+ init_envp_str,
+ &start_state,
+ 0,
+ &elf_dflt_alloc_pmem
+ );
+ if (status) {
+ printk("Failed to load init_task (status=%d).\n", status);
+ return status;
+ }
+
+ /* This prevents the address space from being deleted by
+ * user-space, since the kernel never releases this reference */
+ if (!aspace_acquire(INIT_ASPACE_ID)) {
+ printk("Failed to acquire INIT_ASPACE_ID.\n");
+ return status;
+ }
+
+ return task_create(INIT_TASK_ID, "init_task", &start_state, NULL);
+}
--- /dev/null
+/*
+ * kallsyms.c: in-kernel printing of symbolic oopses and stack traces.
+ *
+ * Rewritten and vastly simplified by Rusty Russell for in-kernel
+ * module loader:
+ * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ *
+ * ChangeLog:
+ *
+ * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
+ * Changed the compression method from stem compression to "table lookup"
+ * compression (see scripts/kallsyms.c for a more complete description)
+ */
+#include <lwk/kernel.h>
+#include <lwk/kallsyms.h>
+#include <lwk/init.h>
+#include <lwk/string.h>
+#include <arch/sections.h>
+
+#ifdef CONFIG_KALLSYMS_ALL
+#define all_var 1
+#else
+#define all_var 0
+#endif
+
+/**
+ * These will be re-linked against their
+ * real values during the second link stage
+ */
+extern unsigned long kallsyms_addresses[] __attribute__((weak));
+extern unsigned long kallsyms_num_syms __attribute__((weak,section("data")));
+extern u8 kallsyms_names[] __attribute__((weak));
+
+extern u8 kallsyms_token_table[] __attribute__((weak));
+extern u16 kallsyms_token_index[] __attribute__((weak));
+
+extern unsigned long kallsyms_markers[] __attribute__((weak));
+
+static inline int is_kernel_inittext(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sinittext
+ && addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+}
+
+static inline int is_kernel_extratext(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sextratext
+ && addr <= (unsigned long)_eextratext)
+ return 1;
+ return 0;
+}
+
+static inline int is_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext)
+ return 1;
+ return 0;
+}
+
+static inline int is_kernel(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ return 1;
+ return 0;
+}
+
+/**
+ * Expand a compressed symbol data into the resulting uncompressed string,
+ * given the offset to where the symbol is in the compressed stream.
+ */
+static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
+{
+ int len, skipped_first = 0;
+ u8 *tptr, *data;
+
+ /* get the compressed symbol length from the first symbol byte */
+ data = &kallsyms_names[off];
+ len = *data;
+ data++;
+
+ /* update the offset to return the offset for the next symbol on
+ * the compressed stream */
+ off += len + 1;
+
+ /* for every byte on the compressed symbol data, copy the table
+ entry for that byte */
+ while(len) {
+ tptr = &kallsyms_token_table[ kallsyms_token_index[*data] ];
+ data++;
+ len--;
+
+ while (*tptr) {
+ if(skipped_first) {
+ *result = *tptr;
+ result++;
+ } else
+ skipped_first = 1;
+ tptr++;
+ }
+ }
+
+ *result = '\0';
+
+ /* return to offset to the next symbol */
+ return off;
+}
+
+/**
+ * Find the offset on the compressed stream given and index in the
+ * kallsyms array.
+ */
+static unsigned int get_symbol_offset(unsigned long pos)
+{
+ u8 *name;
+ int i;
+
+ /* use the closest marker we have. We have markers every 256 positions,
+ * so that should be close enough */
+ name = &kallsyms_names[ kallsyms_markers[pos>>8] ];
+
+ /* sequentially scan all the symbols up to the point we're searching
+ * for. Every symbol is stored in a [<len>][<len> bytes of data]
+ * format, so we just need to add the len to the current pointer for
+ * every symbol we wish to skip */
+ for(i = 0; i < (pos&0xFF); i++)
+ name = name + (*name) + 1;
+
+ return name - kallsyms_names;
+}
+
+/**
+ * Lookup the address for this symbol. Returns 0 if not found.
+ */
+unsigned long kallsyms_lookup_name(const char *name)
+{
+ char namebuf[KSYM_NAME_LEN+1];
+ unsigned long i;
+ unsigned int off;
+
+ for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
+ off = kallsyms_expand_symbol(off, namebuf);
+
+ if (strcmp(namebuf, name) == 0)
+ return kallsyms_addresses[i];
+ }
+ return 0;
+}
+
+/**
+ * Lookup the symbol name corresponding to a kernel address
+ */
+const char *kallsyms_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char *namebuf)
+{
+ unsigned long i, low, high, mid;
+
+ /* This kernel should never had been booted. */
+ BUG_ON(!kallsyms_addresses);
+
+ namebuf[KSYM_NAME_LEN] = 0;
+ namebuf[0] = 0;
+
+ if ((all_var && is_kernel(addr)) ||
+ (!all_var && (is_kernel_text(addr) || is_kernel_inittext(addr) ||
+ is_kernel_extratext(addr)))) {
+ unsigned long symbol_end = 0;
+
+ /* do a binary search on the sorted kallsyms_addresses array */
+ low = 0;
+ high = kallsyms_num_syms;
+
+ while (high-low > 1) {
+ mid = (low + high) / 2;
+ if (kallsyms_addresses[mid] <= addr) low = mid;
+ else high = mid;
+ }
+
+ /* search for the first aliased symbol. Aliased symbols are
+ symbols with the same address */
+ while (low && kallsyms_addresses[low - 1] ==
+ kallsyms_addresses[low])
+ --low;
+
+ /* Grab name */
+ kallsyms_expand_symbol(get_symbol_offset(low), namebuf);
+
+ /* Search for next non-aliased symbol */
+ for (i = low + 1; i < kallsyms_num_syms; i++) {
+ if (kallsyms_addresses[i] > kallsyms_addresses[low]) {
+ symbol_end = kallsyms_addresses[i];
+ break;
+ }
+ }
+
+ /* if we found no next symbol, we use the end of the section */
+ if (!symbol_end) {
+ if (is_kernel_inittext(addr))
+ symbol_end = (unsigned long)_einittext;
+ else
+ symbol_end = (all_var) ? (unsigned long)_end
+ : (unsigned long)_etext;
+ }
+
+ if (symbolsize)
+ *symbolsize = symbol_end - kallsyms_addresses[low];
+ if (offset)
+ *offset = addr - kallsyms_addresses[low];
+ return namebuf;
+ }
+
+ return NULL;
+}
+
--- /dev/null
+obj-y := uname.o brk.o write.o id.o fstat.o mmap.o \
+ gettimeofday.o settimeofday.o nanosleep.o sched_yield.o \
+ time.o
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+#include <lwk/aspace.h>
+
+unsigned long
+sys_brk(unsigned long brk)
+{
+ struct aspace *as = current->aspace;
+
+ if ((brk >= as->heap_start) && (brk < as->mmap_brk))
+ as->brk = brk;
+
+ return as->brk;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/stat.h>
+#include <arch/uaccess.h>
+
+long
+sys_fstat(unsigned int fd, struct stat __user *statbuf)
+{
+ struct stat tmp;
+
+ /* For now only allow stat()'ing stdio */
+ if (fd != 1)
+ return -EBADF;
+
+ /* TODO: Fix this! */
+ tmp.st_dev = 11;
+ tmp.st_ino = 9;
+ tmp.st_mode = 0x2190;
+ tmp.st_nlink = 1;
+ tmp.st_uid = 0;
+ tmp.st_gid = 0;
+ tmp.st_rdev = 34823;
+ tmp.st_size = 0;
+ tmp.st_blksize = 1024;
+ tmp.st_blocks = 0;
+ tmp.st_atime = 1204772189;
+ tmp.st_mtime = 1204772189;
+ tmp.st_ctime = 1204769465;
+
+ return copy_to_user(statbuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/time.h>
+#include <arch/uaccess.h>
+
+int
+sys_gettimeofday(
+ struct timeval __user * tv,
+ struct timezone __user * tz
+)
+{
+ struct timeval _tv;
+ struct timezone _tz;
+
+ if (tv != NULL) {
+ uint64_t now = get_time(); /* nanoseconds */
+
+ _tv.tv_sec = now / NSEC_PER_SEC;
+ _tv.tv_usec = (now % NSEC_PER_SEC) / USEC_PER_NSEC;
+
+ if (copy_to_user(tv, &_tv, sizeof(_tv)))
+ return -EFAULT;
+ }
+
+ if (tz != NULL) {
+ _tz.tz_minuteswest = 0;
+ _tz.tz_dsttime = 0;
+
+ if (copy_to_user(tz, &_tz, sizeof(_tz)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+
+long
+sys_getuid(void)
+{
+ return current->uid;
+}
+
+long
+sys_getgid(void)
+{
+ return current->gid;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+#include <lwk/aspace.h>
+#include <arch/mman.h>
+
+long
+sys_mmap(
+ unsigned long addr,
+ unsigned long len,
+ unsigned long prot,
+ unsigned long flags,
+ unsigned long fd,
+ unsigned long off
+)
+{
+ unsigned long mmap_brk;
+
+ /* For now we only support private/anonymous mappings */
+ if (!(flags & MAP_PRIVATE) || !(flags & MAP_ANONYMOUS))
+ return -EINVAL;
+
+ if (len != round_up(len, PAGE_SIZE))
+ return -EINVAL;
+
+ mmap_brk = current->aspace->mmap_brk;
+ mmap_brk = round_down(mmap_brk - len, PAGE_SIZE);
+
+ /* Protect against extending into the UNIX data segment */
+ if (mmap_brk <= current->aspace->brk)
+ return -ENOMEM;
+
+ current->aspace->mmap_brk = mmap_brk;
+ return mmap_brk;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/time.h>
+#include <lwk/timer.h>
+#include <arch/uaccess.h>
+
+int
+sys_nanosleep(const struct timespec __user *req, struct timespec __user *rem)
+{
+ struct timespec _req, _rem;
+ uint64_t when, remain;
+
+ if (copy_from_user(&_req, req, sizeof(_req)))
+ return -EFAULT;
+
+ if (!timespec_is_valid(&_req))
+ return -EINVAL;
+
+ when = get_time() + (_req.tv_sec * NSEC_PER_SEC) + _req.tv_nsec;
+ remain = timer_sleep_until(when);
+
+ if (remain && rem) {
+ _rem.tv_sec = remain / NSEC_PER_SEC;
+ _rem.tv_nsec = remain % NSEC_PER_SEC;
+
+ if (copy_to_user(rem, &_rem, sizeof(_rem)))
+ return -EFAULT;
+ }
+
+ return (remain) ? -ERESTART_RESTARTBLOCK : 0;
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+
+int
+sys_sched_yield(void)
+{
+ return task_yield();
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/time.h>
+#include <arch/uaccess.h>
+
+int
+sys_settimeofday(
+ struct timeval __user * tv,
+ struct timezone __user * tz
+)
+{
+ struct timeval _tv;
+ struct timezone _tz;
+
+ if (tv != NULL) {
+ if (copy_from_user(&_tv, tv, sizeof(_tv)))
+ return -EFAULT;
+
+ set_time(
+ (_tv.tv_sec * NSEC_PER_SEC) + (_tv.tv_usec * NSEC_PER_USEC)
+ );
+ }
+
+ if (tz != NULL) {
+ if (copy_from_user(&_tz, tz, sizeof(_tz)))
+ return -EFAULT;
+
+ /* Only support setting timezone to 0 */
+ if ((_tz.tz_minuteswest != 0) || (_tz.tz_dsttime != 0))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/time.h>
+#include <arch/uaccess.h>
+
+time_t
+sys_time(time_t __user *t)
+{
+ time_t now_sec = (time_t)(get_time() / NSEC_PER_SEC);
+
+ if (t && copy_to_user(t, &now_sec, sizeof(now_sec)))
+ return -EFAULT;
+
+ return now_sec;
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <arch/uaccess.h>
+
+long
+sys_uname(struct utsname __user *name)
+{
+ int err = copy_to_user(name, &linux_utsname, sizeof(*name));
+ return err ? -EFAULT : 0;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+#include <arch/uaccess.h>
+
+ssize_t
+sys_write(unsigned int fd, const char __user * buf, size_t count)
+{
+ char kbuf[512];
+ size_t kcount = count;
+
+ /* For now we only support stdout console output */
+ if (fd != 1)
+ return -EBADF;
+
+ /* Protect against overflowing the kernel buffer */
+ if (kcount >= sizeof(kbuf))
+ kcount = sizeof(kbuf) - 1;
+
+ /* Copy the user-level string to a kernel buffer */
+ if (copy_from_user(kbuf, buf, kcount))
+ return -EFAULT;
+ kbuf[kcount] = '\0';
+
+ /* Write the string to the local console */
+ printk(KERN_USERMSG
+ "(%s) %s%s",
+ current->name,
+ kbuf,
+ (kcount != count) ? " <TRUNCATED>" : ""
+ );
+
+ /* Return number of characters actually printed */
+ return kcount;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+
+/**
+ * Scream and die.
+ */
+void panic(const char * fmt, ...)
+{
+ static char buf[1024];
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+ printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
+
+ while (1) {}
+}
--- /dev/null
+/* Helpers for initial module or kernel cmdline parsing
+ Copyright (C) 2001 Rusty Russell.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+#include <lwk/kernel.h>
+#include <lwk/string.h>
+#include <lwk/errno.h>
+#include <lwk/params.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt, a...)
+#endif
+
+static inline char dash2underscore(char c)
+{
+ if (c == '-')
+ return '_';
+ return c;
+}
+
+static inline int parameq(const char *input, const char *paramname)
+{
+ unsigned int i;
+ for (i = 0; dash2underscore(input[i]) == paramname[i]; i++)
+ if (input[i] == '\0')
+ return 1;
+ return 0;
+}
+
+static int parse_one(char *param,
+ char *val,
+ struct kernel_param *params,
+ unsigned num_params,
+ int (*handle_unknown)(char *param, char *val))
+{
+ unsigned int i;
+printk("in parse_one(%s, %s)\n", param, val);
+
+ /* Find parameter */
+ for (i = 0; i < num_params; i++) {
+ if (parameq(param, params[i].name)) {
+ DEBUGP("They are equal! Calling %p\n",
+ params[i].set);
+ return params[i].set(val, ¶ms[i]);
+ }
+ }
+
+ if (handle_unknown) {
+ DEBUGP("Unknown argument: calling %p\n", handle_unknown);
+ return handle_unknown(param, val);
+ }
+
+ /* Ignore unknown args if no handle_unknown function specified */
+ printk("Unknown argument `%s'\n", param);
+ return 0;
+}
+
+/* You can use " around spaces, but can't escape ". */
+/* Hyphens and underscores equivalent in parameter names. */
+static char *next_arg(char *args, char **param, char **val)
+{
+ unsigned int i, equals = 0;
+ int in_quote = 0, quoted = 0;
+ char *next;
+
+ if (*args == '"') {
+ args++;
+ in_quote = 1;
+ quoted = 1;
+ }
+
+ for (i = 0; args[i]; i++) {
+ if (args[i] == ' ' && !in_quote)
+ break;
+ if (equals == 0) {
+ if (args[i] == '=')
+ equals = i;
+ }
+ if (args[i] == '"')
+ in_quote = !in_quote;
+ }
+
+ *param = args;
+ if (!equals)
+ *val = NULL;
+ else {
+ args[equals] = '\0';
+ *val = args + equals + 1;
+
+ /* Don't include quotes in value. */
+ if (**val == '"') {
+ (*val)++;
+ if (args[i-1] == '"')
+ args[i-1] = '\0';
+ }
+ if (quoted && args[i-1] == '"')
+ args[i-1] = '\0';
+ }
+
+ if (args[i]) {
+ args[i] = '\0';
+ next = args + i + 1;
+ } else
+ next = args + i;
+
+ /* Chew up trailing spaces. */
+ while (*next == ' ')
+ next++;
+ return next;
+}
+
+/* Args looks like "foo=bar,bar2 baz=fuz wiz". */
+int parse_args(const char *name,
+ char *args,
+ struct kernel_param *params,
+ unsigned num,
+ int (*unknown)(char *param, char *val))
+{
+ char *param, *val;
+
+ DEBUGP("Parsing ARGS: %s\n", args);
+
+ /* Chew leading spaces */
+ while (*args == ' ')
+ args++;
+
+ while (*args) {
+ int ret;
+
+ args = next_arg(args, ¶m, &val);
+ ret = parse_one(param, val, params, num, unknown);
+ switch (ret) {
+ case -ENOENT:
+ printk(KERN_ERR "%s: Unknown parameter `%s'\n",
+ name, param);
+ return ret;
+ case -ENOSPC:
+ printk(KERN_ERR
+ "%s: `%s' too large for parameter `%s'\n",
+ name, val ?: "", param);
+ return ret;
+ case 0:
+ break;
+ default:
+ printk(KERN_ERR
+ "%s: `%s' invalid for parameter `%s'\n",
+ name, val ?: "", param);
+ return ret;
+ }
+ }
+
+ /* All parsed OK. */
+ return 0;
+}
+
+/* Lazy bastard, eh? */
+#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \
+ int param_set_##name(const char *val, struct kernel_param *kp) \
+ { \
+ char *endp; \
+ tmptype l; \
+ \
+ if (!val) return -EINVAL; \
+ l = strtolfn(val, &endp, 0); \
+ if (endp == val || ((type)l != l)) \
+ return -EINVAL; \
+ *((type *)kp->arg) = l; \
+ return 0; \
+ } \
+ int param_get_##name(char *buffer, struct kernel_param *kp) \
+ { \
+ return sprintf(buffer, format, *((type *)kp->arg)); \
+ }
+
+STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, simple_strtol);
+STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, simple_strtol);
+STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, simple_strtol);
+STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, simple_strtoul);
+
+int param_set_charp(const char *val, struct kernel_param *kp)
+{
+ if (!val) {
+ printk(KERN_ERR "%s: string parameter expected\n",
+ kp->name);
+ return -EINVAL;
+ }
+
+ if (strlen(val) > 1024) {
+ printk(KERN_ERR "%s: string parameter too long\n",
+ kp->name);
+ return -ENOSPC;
+ }
+
+ *(char **)kp->arg = (char *)val;
+ return 0;
+}
+
+int param_get_charp(char *buffer, struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s", *((char **)kp->arg));
+}
+
+int param_set_bool(const char *val, struct kernel_param *kp)
+{
+ /* No equals means "set"... */
+ if (!val) val = "1";
+
+ /* One of =[yYnN01] */
+ switch (val[0]) {
+ case 'y': case 'Y': case '1':
+ *(int *)kp->arg = 1;
+ return 0;
+ case 'n': case 'N': case '0':
+ *(int *)kp->arg = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+int param_get_bool(char *buffer, struct kernel_param *kp)
+{
+ /* Y and N chosen as being relatively non-coder friendly */
+ return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'Y' : 'N');
+}
+
+int param_set_invbool(const char *val, struct kernel_param *kp)
+{
+ int boolval, ret;
+ struct kernel_param dummy = { .arg = &boolval };
+
+ ret = param_set_bool(val, &dummy);
+ if (ret == 0)
+ *(int *)kp->arg = !boolval;
+ return ret;
+}
+
+int param_get_invbool(char *buffer, struct kernel_param *kp)
+{
+ int val;
+ struct kernel_param dummy = { .arg = &val };
+
+ val = !*(int *)kp->arg;
+ return param_get_bool(buffer, &dummy);
+}
+
+/* We cheat here and temporarily mangle the string. */
+static int _param_array(const char *name,
+ const char *val,
+ unsigned int min, unsigned int max,
+ void *elem, int elemsize,
+ int (*set)(const char *, struct kernel_param *kp),
+ int *num)
+{
+ int ret;
+ struct kernel_param kp;
+ char save;
+
+ /* Get the name right for errors. */
+ kp.name = name;
+ kp.arg = elem;
+
+ /* No equals sign? */
+ if (!val) {
+ printk(KERN_ERR "%s: expects arguments\n", name);
+ return -EINVAL;
+ }
+
+ *num = 0;
+ /* We expect a comma-separated list of values. */
+ do {
+ int len;
+
+ if (*num == max) {
+ printk(KERN_ERR "%s: can only take %i arguments\n",
+ name, max);
+ return -EINVAL;
+ }
+ len = strcspn(val, ",");
+
+ /* nul-terminate and parse */
+ save = val[len];
+ ((char *)val)[len] = '\0';
+ ret = set(val, &kp);
+
+ if (ret != 0)
+ return ret;
+ kp.arg += elemsize;
+ val += len+1;
+ (*num)++;
+ } while (save == ',');
+
+ if (*num < min) {
+ printk(KERN_ERR "%s: needs at least %i arguments\n",
+ name, min);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int param_array_set(const char *val, struct kernel_param *kp)
+{
+ struct kparam_array *arr = kp->arg;
+ unsigned int temp_num;
+
+ return _param_array(kp->name, val, 1, arr->max, arr->elem,
+ arr->elemsize, arr->set, arr->num ?: &temp_num);
+}
+
+int param_array_get(char *buffer, struct kernel_param *kp)
+{
+ int i, off, ret;
+ struct kparam_array *arr = kp->arg;
+ struct kernel_param p;
+
+ p = *kp;
+ for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) {
+ if (i)
+ buffer[off++] = ',';
+ p.arg = arr->elem + arr->elemsize * i;
+ ret = arr->get(buffer + off, &p);
+ if (ret < 0)
+ return ret;
+ off += ret;
+ }
+ buffer[off] = '\0';
+ return off;
+}
+
+int param_set_copystring(const char *val, struct kernel_param *kp)
+{
+ struct kparam_string *kps = kp->arg;
+
+ if (strlen(val)+1 > kps->maxlen) {
+ printk(KERN_ERR "%s: string doesn't fit in %u chars.\n",
+ kp->name, kps->maxlen-1);
+ return -ENOSPC;
+ }
+ strcpy(kps->string, val);
+ return 0;
+}
+
+int param_get_string(char *buffer, struct kernel_param *kp)
+{
+ struct kparam_string *kps = kp->arg;
+ return strlcpy(buffer, kps->string, kps->maxlen);
+}
+
+/**
+ * Parses all parameters from the input string.
+ */
+int parse_params(const char *str)
+{
+ struct kernel_param * params = __start___param;
+ unsigned int num_params = __stop___param - __start___param;
+ char tmp[2048];
+
+ // Make a temporary copy of the string since parse_args modifies it
+ if (strlen(str)+1 > sizeof(tmp)) {
+ printk(KERN_ERR "parse_params: input string too large");
+ return -ENOSPC;
+ }
+
+ strcpy(tmp, str);
+ return parse_args("Parsing Arguments", tmp, params, num_params, NULL);
+}
+
+/**
+ * Manually sets the specified parameter.
+ */
+int param_set_by_name_int(char *param, int val)
+{
+ struct kernel_param * params = __start___param;
+ unsigned int num_params = __stop___param - __start___param;
+ char valstr[128];
+
+ sprintf(valstr, "%d", val);
+ return parse_one(param, valstr, params, num_params, NULL);
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/console.h>
+#include <lwk/smp.h>
+
+/**
+ * Prints a message to the console.
+ */
+int printk(const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ char buf[1024];
+ char *p = buf;
+ int remain = sizeof(buf);
+
+ /* Start with a NULL terminated string */
+ *p = '\0';
+
+ /* Tack on the logical CPU ID */
+ len = sprintf(p, "[%u]:", this_cpu);
+ p += len;
+ remain -= len;
+
+ /* Construct the string... */
+ va_start(args, fmt);
+ len = vscnprintf(p, remain, fmt, args);
+ va_end(args);
+
+ /* Pass the string to the console subsystem */
+ console_write(buf);
+
+ /* Return number of characters printed */
+ return len;
+}
+
--- /dev/null
+/*
+ * linux/kernel/resource.c
+ *
+ * Copyright (C) 1999 Linus Torvalds
+ * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
+ *
+ * Arbitrary resource management.
+ */
+
+#include <lwk/errno.h>
+#include <lwk/resource.h>
+#include <lwk/init.h>
+#include <lwk/spinlock.h>
+#include <arch/io.h>
+
+struct resource ioport_resource = {
+ .name = "PCI IO",
+ .start = 0x0000,
+ .end = IO_SPACE_LIMIT,
+ .flags = IORESOURCE_IO,
+};
+
+struct resource iomem_resource = {
+ .name = "PCI mem",
+ .start = 0UL,
+ .end = ~0UL,
+ .flags = IORESOURCE_MEM,
+};
+
+static DEFINE_RWLOCK(resource_lock);
+
+/* Return the conflict entry if you can't request it */
+static struct resource * __request_resource(struct resource *root, struct resource *new)
+{
+ unsigned long start = new->start;
+ unsigned long end = new->end;
+ struct resource *tmp, **p;
+
+ if (end < start)
+ return root;
+ if (start < root->start)
+ return root;
+ if (end > root->end)
+ return root;
+ p = &root->child;
+ for (;;) {
+ tmp = *p;
+ if (!tmp || tmp->start > end) {
+ new->sibling = tmp;
+ *p = new;
+ new->parent = root;
+ return NULL;
+ }
+ p = &tmp->sibling;
+ if (tmp->end < start)
+ continue;
+ return tmp;
+ }
+}
+
+static int __release_resource(struct resource *old)
+{
+ struct resource *tmp, **p;
+
+ BUG_ON(old->child);
+
+ p = &old->parent->child;
+ for (;;) {
+ tmp = *p;
+ if (!tmp)
+ break;
+ if (tmp == old) {
+ *p = tmp->sibling;
+ old->parent = NULL;
+ return 0;
+ }
+ p = &tmp->sibling;
+ }
+ return -EINVAL;
+}
+
+int request_resource(struct resource *root, struct resource *new)
+{
+ struct resource *conflict;
+
+ write_lock(&resource_lock);
+ conflict = __request_resource(root, new);
+ write_unlock(&resource_lock);
+ return conflict ? -EBUSY : 0;
+}
+
+struct resource *____request_resource(struct resource *root, struct resource *new)
+{
+ struct resource *conflict;
+
+ write_lock(&resource_lock);
+ conflict = __request_resource(root, new);
+ write_unlock(&resource_lock);
+ return conflict;
+}
+
+int release_resource(struct resource *old)
+{
+ int retval;
+
+ write_lock(&resource_lock);
+ retval = __release_resource(old);
+ write_unlock(&resource_lock);
+ return retval;
+}
+
+/*
+ * Find empty slot in the resource tree given range and alignment.
+ */
+static int find_resource(struct resource *root, struct resource *new,
+ unsigned long size,
+ unsigned long min, unsigned long max,
+ unsigned long align,
+ void (*alignf)(void *, struct resource *,
+ unsigned long, unsigned long),
+ void *alignf_data)
+{
+ struct resource *this = root->child;
+
+ new->start = root->start;
+ /*
+ * Skip past an allocated resource that starts at 0, since the assignment
+ * of this->start - 1 to new->end below would cause an underflow.
+ */
+ if (this && this->start == 0) {
+ new->start = this->end + 1;
+ this = this->sibling;
+ }
+ for(;;) {
+ if (this)
+ new->end = this->start - 1;
+ else
+ new->end = root->end;
+ if (new->start < min)
+ new->start = min;
+ if (new->end > max)
+ new->end = max;
+ new->start = ALIGN(new->start, align);
+ if (alignf)
+ alignf(alignf_data, new, size, align);
+ if (new->start < new->end && new->end - new->start >= size - 1) {
+ new->end = new->start + size - 1;
+ return 0;
+ }
+ if (!this)
+ break;
+ new->start = this->end + 1;
+ this = this->sibling;
+ }
+ return -EBUSY;
+}
+
+/*
+ * Allocate empty slot in the resource tree given range and alignment.
+ */
+int allocate_resource(struct resource *root, struct resource *new,
+ unsigned long size,
+ unsigned long min, unsigned long max,
+ unsigned long align,
+ void (*alignf)(void *, struct resource *,
+ unsigned long, unsigned long),
+ void *alignf_data)
+{
+ int err;
+
+ write_lock(&resource_lock);
+ err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
+ if (err >= 0 && __request_resource(root, new))
+ err = -EBUSY;
+ write_unlock(&resource_lock);
+ return err;
+}
+
+/**
+ * insert_resource - Inserts a resource in the resource tree
+ * @parent: parent of the new resource
+ * @new: new resource to insert
+ *
+ * Returns 0 on success, -EBUSY if the resource can't be inserted.
+ *
+ * This function is equivalent to request_resource when no conflict
+ * happens. If a conflict happens, and the conflicting resources
+ * entirely fit within the range of the new resource, then the new
+ * resource is inserted and the conflicting resources become children of
+ * the new resource.
+ */
+int insert_resource(struct resource *parent, struct resource *new)
+{
+ int result;
+ struct resource *first, *next;
+
+ write_lock(&resource_lock);
+
+ for (;; parent = first) {
+ result = 0;
+ first = __request_resource(parent, new);
+ if (!first)
+ goto out;
+
+ result = -EBUSY;
+ if (first == parent)
+ goto out;
+
+ if ((first->start > new->start) || (first->end < new->end))
+ break;
+ if ((first->start == new->start) && (first->end == new->end))
+ break;
+ }
+
+ for (next = first; ; next = next->sibling) {
+ /* Partial overlap? Bad, and unfixable */
+ if (next->start < new->start || next->end > new->end)
+ goto out;
+ if (!next->sibling)
+ break;
+ if (next->sibling->start > new->end)
+ break;
+ }
+
+ result = 0;
+
+ new->parent = parent;
+ new->sibling = next->sibling;
+ new->child = first;
+
+ next->sibling = NULL;
+ for (next = first; next; next = next->sibling)
+ next->parent = new;
+
+ if (parent->child == first) {
+ parent->child = new;
+ } else {
+ next = parent->child;
+ while (next->sibling != first)
+ next = next->sibling;
+ next->sibling = new;
+ }
+
+ out:
+ write_unlock(&resource_lock);
+ return result;
+}
+
+/*
+ * Given an existing resource, change its start and size to match the
+ * arguments. Returns -EBUSY if it can't fit. Existing children of
+ * the resource are assumed to be immutable.
+ */
+int adjust_resource(struct resource *res, unsigned long start, unsigned long size)
+{
+ struct resource *tmp, *parent = res->parent;
+ unsigned long end = start + size - 1;
+ int result = -EBUSY;
+
+ write_lock(&resource_lock);
+
+ if ((start < parent->start) || (end > parent->end))
+ goto out;
+
+ for (tmp = res->child; tmp; tmp = tmp->sibling) {
+ if ((tmp->start < start) || (tmp->end > end))
+ goto out;
+ }
+
+ if (res->sibling && (res->sibling->start <= end))
+ goto out;
+
+ tmp = parent->child;
+ if (tmp != res) {
+ while (tmp->sibling != res)
+ tmp = tmp->sibling;
+ if (start <= tmp->end)
+ goto out;
+ }
+
+ res->start = start;
+ res->end = end;
+ result = 0;
+
+ out:
+ write_unlock(&resource_lock);
+ return result;
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/spinlock.h>
+#include <lwk/percpu.h>
+#include <lwk/aspace.h>
+#include <lwk/sched.h>
+#include <lwk/xcall.h>
+
+struct run_queue {
+ spinlock_t lock;
+ size_t num_tasks;
+ struct list_head task_list;
+ struct task_struct * idle_task;
+};
+
+static DEFINE_PER_CPU(struct run_queue, run_queue);
+
+static void
+idle_task_loop(void) {
+ while (1) {
+ arch_idle_task_loop_body();
+ schedule();
+ }
+}
+
+int __init
+sched_subsys_init(void)
+{
+ id_t cpu_id;
+ struct run_queue *runq;
+ struct task_struct *idle_task;
+ start_state_t start_state;
+ int status;
+
+ /* Reserve the idle tasks' ID. All idle tasks share the same ID. */
+ status = __task_reserve_id(IDLE_TASK_ID);
+ if (status)
+ panic("Failed to reserve IDLE_TASK_ID (status=%d).", status);
+
+ /* Initialize each CPU's run queue */
+ for_each_cpu_mask(cpu_id, cpu_present_map) {
+ runq = &per_cpu(run_queue, cpu_id);
+
+ spin_lock_init(&runq->lock);
+ runq->num_tasks = 0;
+ list_head_init(&runq->task_list);
+
+ /*
+ * Create this CPU's idle task. When a CPU has no
+ * other work to do, it runs the idle task.
+ */
+ start_state.uid = 0;
+ start_state.gid = 0;
+ start_state.aspace_id = KERNEL_ASPACE_ID;
+ start_state.entry_point = (vaddr_t)idle_task_loop;
+ start_state.stack_ptr = 0; /* will be set automatically */
+ start_state.cpu_id = cpu_id;
+ start_state.cpumask = NULL;
+
+ status = __task_create(IDLE_TASK_ID, "idle_task", &start_state,
+ &idle_task);
+ if (status)
+ panic("Failed to create idle_task (status=%d).",status);
+
+ runq->idle_task = idle_task;
+ }
+
+ return 0;
+}
+
+void
+sched_add_task(struct task_struct *task)
+{
+ id_t cpu = task->cpu_id;
+ struct run_queue *runq;
+ unsigned long irqstate;
+
+ runq = &per_cpu(run_queue, cpu);
+ spin_lock_irqsave(&runq->lock, irqstate);
+ list_add_tail(&task->sched_link, &runq->task_list);
+ ++runq->num_tasks;
+ spin_unlock_irqrestore(&runq->lock, irqstate);
+
+ if (cpu != this_cpu)
+ xcall_reschedule(cpu);
+}
+
+void
+sched_del_task(struct task_struct *task)
+{
+ struct run_queue *runq;
+ unsigned long irqstate;
+
+ runq = &per_cpu(run_queue, task->cpu_id);
+ spin_lock_irqsave(&runq->lock, irqstate);
+ list_del(&task->sched_link);
+ --runq->num_tasks;
+ spin_unlock_irqrestore(&runq->lock, irqstate);
+}
+
+int
+sched_wakeup_task(struct task_struct *task, taskstate_t valid_states)
+{
+ id_t cpu;
+ struct run_queue *runq;
+ int status;
+ unsigned long irqstate;
+
+ /* Protect against the task being migrated to a different CPU */
+repeat_lock_runq:
+ cpu = task->cpu_id;
+ runq = &per_cpu(run_queue, cpu);
+ spin_lock_irqsave(&runq->lock, irqstate);
+ if (cpu != task->cpu_id) {
+ spin_unlock_irqrestore(&runq->lock, irqstate);
+ goto repeat_lock_runq;
+ }
+ if (task->state & valid_states) {
+ set_mb(task->state, TASKSTATE_READY);
+ status = 0;
+ } else {
+ status = -EINVAL;
+ }
+ spin_unlock_irqrestore(&runq->lock, irqstate);
+
+ if (!status && (cpu != this_cpu))
+ xcall_reschedule(cpu);
+
+ return status;
+}
+
+static void
+context_switch(struct task_struct *prev, struct task_struct *next)
+{
+ /* Switch to the next task's address space */
+ if (prev->aspace != next->aspace)
+ arch_aspace_activate(next->aspace);
+
+ /**
+ * Switch to the next task's register state and kernel stack.
+ * There are three tasks involved in a context switch:
+ * 1. The previous task
+ * 2. The next task
+ * 3. The task that was running when next was suspended
+ * arch_context_switch() returns 1 so that we can maintain
+ * the correct value of prev. Otherwise, the restored register
+ * state of next would have prev set to 3, which we don't care
+ * about (it may have moved CPUs, been destroyed, etc.).
+ */
+ prev = arch_context_switch(prev, next);
+
+ /* Prevent compiler from optimizing beyond this point */
+ barrier();
+}
+
+void
+schedule(void)
+{
+ struct run_queue *runq = &per_cpu(run_queue, this_cpu);
+ struct task_struct *prev = current, *next = NULL, *task;
+
+ spin_lock_irq(&runq->lock);
+
+ /* Move the currently running task to the end of the run queue */
+ if (!list_empty(&prev->sched_link)) {
+ list_del(&prev->sched_link);
+ /* If the task has exited, don't re-link it */
+ if (prev->state != TASKSTATE_EXIT_ZOMBIE)
+ list_add_tail(&prev->sched_link, &runq->task_list);
+ }
+
+ /* Look for a ready to execute task */
+ list_for_each_entry(task, &runq->task_list, sched_link) {
+ if (task->state == TASKSTATE_READY) {
+ next = task;
+ break;
+ }
+ }
+
+ /* If no tasks are ready to run, run the idle task */
+ if (next == NULL)
+ next = runq->idle_task;
+
+ if (prev != next) {
+ context_switch(prev, next);
+ /* next is now running, since it may have changed CPUs while
+ * it was sleeping, we need to refresh local variables */
+ runq = &per_cpu(run_queue, this_cpu);
+ }
+
+ spin_unlock_irq(&runq->lock);
+}
+
+void
+schedule_new_task_tail(void)
+{
+ struct run_queue *runq = &per_cpu(run_queue, this_cpu);
+ BUG_ON(irqs_enabled());
+ spin_unlock(&runq->lock); /* keep IRQs disabled, arch code will
+ * re-enable IRQs as part of starting
+ * the new task */
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/show.h>
+
+/**
+ * Prints the contents of memory in hex to the console.
+ * The region printed starts at vaddr and extends n unsigned longs.
+ */
+void
+show_memory(unsigned long vaddr, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ printk(KERN_DEBUG "0x%016lx: 0x%08x_%08x\n",
+ vaddr,
+ *((unsigned int *)(vaddr+4)),
+ *((unsigned int *)(vaddr))
+ );
+ vaddr += sizeof(unsigned long);
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (2004) Linus Torvalds
+ *
+ * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
+ *
+ * Copyright (2004, 2005) Ingo Molnar
+ *
+ * This file contains the spinlock/rwlock implementations for the
+ * SMP and the DEBUG_SPINLOCK cases.
+ */
+
+#include <lwk/linkage.h>
+#include <lwk/spinlock.h>
+//#include <lwk/interrupt.h>
+#include <lwk/linux_compat.h>
+
+/*
+ * Generic declaration of the raw read_trylock() function,
+ * architectures are supposed to optimize this:
+ */
+int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
+{
+ __raw_read_lock(lock);
+ return 1;
+}
+EXPORT_SYMBOL(generic__raw_read_trylock);
+
+int __lockfunc _spin_trylock(spinlock_t *lock)
+{
+ if (_raw_spin_trylock(lock))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(_spin_trylock);
+
+int __lockfunc _read_trylock(rwlock_t *lock)
+{
+ if (_raw_read_trylock(lock))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(_read_trylock);
+
+int __lockfunc _write_trylock(rwlock_t *lock)
+{
+ if (_raw_write_trylock(lock))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(_write_trylock);
+
+void __lockfunc _read_lock(rwlock_t *lock)
+{
+ _raw_read_lock(lock);
+}
+EXPORT_SYMBOL(_read_lock);
+
+unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ _raw_spin_lock_flags(lock, &flags);
+ return flags;
+}
+EXPORT_SYMBOL(_spin_lock_irqsave);
+
+void __lockfunc _spin_lock_irq(spinlock_t *lock)
+{
+ local_irq_disable();
+ _raw_spin_lock(lock);
+}
+EXPORT_SYMBOL(_spin_lock_irq);
+
+#if 0
+void __lockfunc _spin_lock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ _raw_spin_lock(lock);
+}
+EXPORT_SYMBOL(_spin_lock_bh);
+#endif
+
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ _raw_read_lock(lock);
+ return flags;
+}
+EXPORT_SYMBOL(_read_lock_irqsave);
+
+void __lockfunc _read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ _raw_read_lock(lock);
+}
+EXPORT_SYMBOL(_read_lock_irq);
+
+#if 0
+void __lockfunc _read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ _raw_read_lock(lock);
+}
+EXPORT_SYMBOL(_read_lock_bh);
+#endif
+
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ _raw_write_lock(lock);
+ return flags;
+}
+EXPORT_SYMBOL(_write_lock_irqsave);
+
+void __lockfunc _write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ _raw_write_lock(lock);
+}
+EXPORT_SYMBOL(_write_lock_irq);
+
+#if 0
+void __lockfunc _write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ _raw_write_lock(lock);
+}
+EXPORT_SYMBOL(_write_lock_bh);
+#endif
+
+void __lockfunc _spin_lock(spinlock_t *lock)
+{
+ _raw_spin_lock(lock);
+}
+
+EXPORT_SYMBOL(_spin_lock);
+
+void __lockfunc _write_lock(rwlock_t *lock)
+{
+ _raw_write_lock(lock);
+}
+
+EXPORT_SYMBOL(_write_lock);
+
+void __lockfunc _spin_unlock(spinlock_t *lock)
+{
+ _raw_spin_unlock(lock);
+}
+EXPORT_SYMBOL(_spin_unlock);
+
+void __lockfunc _write_unlock(rwlock_t *lock)
+{
+ _raw_write_unlock(lock);
+}
+EXPORT_SYMBOL(_write_unlock);
+
+void __lockfunc _read_unlock(rwlock_t *lock)
+{
+ _raw_read_unlock(lock);
+}
+EXPORT_SYMBOL(_read_unlock);
+
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ _raw_spin_unlock(lock);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(_spin_unlock_irqrestore);
+
+void __lockfunc _spin_unlock_irq(spinlock_t *lock)
+{
+ _raw_spin_unlock(lock);
+ local_irq_enable();
+}
+EXPORT_SYMBOL(_spin_unlock_irq);
+
+#if 0
+void __lockfunc _spin_unlock_bh(spinlock_t *lock)
+{
+ _raw_spin_unlock(lock);
+ local_bh_enable();
+}
+EXPORT_SYMBOL(_spin_unlock_bh);
+#endif
+
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ _raw_read_unlock(lock);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(_read_unlock_irqrestore);
+
+void __lockfunc _read_unlock_irq(rwlock_t *lock)
+{
+ _raw_read_unlock(lock);
+ local_irq_enable();
+}
+EXPORT_SYMBOL(_read_unlock_irq);
+
+#if 0
+void __lockfunc _read_unlock_bh(rwlock_t *lock)
+{
+ _raw_read_unlock(lock);
+ local_bh_enable();
+}
+EXPORT_SYMBOL(_read_unlock_bh);
+#endif
+
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ _raw_write_unlock(lock);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(_write_unlock_irqrestore);
+
+void __lockfunc _write_unlock_irq(rwlock_t *lock)
+{
+ _raw_write_unlock(lock);
+ local_irq_enable();
+}
+EXPORT_SYMBOL(_write_unlock_irq);
+
+#if 0
+void __lockfunc _write_unlock_bh(rwlock_t *lock)
+{
+ _raw_write_unlock(lock);
+ local_bh_enable();
+}
+EXPORT_SYMBOL(_write_unlock_bh);
+#endif
+
+#if 0
+int __lockfunc _spin_trylock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ if (_raw_spin_trylock(lock))
+ return 1;
+ local_bh_enable();
+ return 0;
+}
+EXPORT_SYMBOL(_spin_trylock_bh);
+#endif
+
+int in_lock_functions(unsigned long addr)
+{
+ /* Linker adds these: start and end of __lockfunc functions */
+ extern char __lock_text_start[], __lock_text_end[];
+
+ return addr >= (unsigned long)__lock_text_start
+ && addr < (unsigned long)__lock_text_end;
+}
+EXPORT_SYMBOL(in_lock_functions);
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/xcall.h>
+#include <lwk/htable.h>
+#include <lwk/aspace.h>
+#include <lwk/task.h>
+#include <lwk/sched.h>
+#include <arch/uaccess.h>
+
+/**
+ * ID space used to allocate task IDs.
+ */
+static idspace_t idspace;
+
+/**
+ * Hash table used to lookup task structures by ID.
+ */
+static htable_t htable;
+
+/**
+ * Lock for serializing access to the htable.
+ */
+static DEFINE_SPINLOCK(htable_lock);
+
+int __init
+task_subsys_init(void)
+{
+ if (idspace_create(__TASK_MIN_ID, __TASK_MAX_ID, &idspace))
+ panic("Failed to create task ID space.");
+
+ if (htable_create(7 /* 2^7 bins */,
+ offsetof(struct task_struct, id),
+ offsetof(struct task_struct, ht_link),
+ &htable))
+ panic("Failed to create task hash table.");
+
+ return 0;
+}
+
+int
+task_get_myid(id_t *id)
+{
+ *id = current->id;
+ return 0;
+}
+
+int
+sys_task_get_myid(id_t __user *id)
+{
+ int status;
+ id_t _id;
+
+ if ((status = task_get_myid(&_id)) != 0)
+ return status;
+
+ if (id && copy_to_user(id, &_id, sizeof(*id)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+__task_reserve_id(id_t id)
+{
+ return idspace_alloc_id(idspace, id, NULL);
+}
+
+int
+__task_create(id_t id, const char *name,
+ const start_state_t *start_state,
+ struct task_struct **task)
+{
+ int status;
+ union task_union *task_union;
+ struct task_struct *tsk;
+
+ if ((task_union = kmem_get_pages(TASK_ORDER)) == NULL)
+ return -ENOMEM;
+
+ tsk = &task_union->task_info;
+
+ /*
+ * Initialize the new task. kmem_alloc() allocates zeroed memory
+ * so fields with an initial state of zero do not need to be explicitly
+ * initialized.
+ */
+ tsk->id = id;
+ if (name)
+ strlcpy(tsk->name, name, sizeof(tsk->name));
+ hlist_node_init(&tsk->ht_link);
+ tsk->state = TASKSTATE_READY;
+ tsk->uid = start_state->uid;
+ tsk->gid = start_state->gid;
+ tsk->aspace = aspace_acquire(start_state->aspace_id);
+ if (!tsk->aspace) {
+ status = -ENOENT;
+ goto error1;
+ }
+ tsk->sighand = NULL;
+ if (start_state->cpumask) {
+ cpumask_user2kernel(start_state->cpumask, &tsk->cpumask);
+ if (!cpus_subset(tsk->cpumask, current->cpumask)) {
+ status = -EINVAL;
+ goto error2;
+ }
+ } else {
+ tsk->cpumask = current->cpumask;
+ }
+ if ((start_state->cpu_id >= NR_CPUS)
+ || !cpu_isset(start_state->cpu_id, tsk->cpumask)) {
+ status = -EINVAL;
+ goto error2;
+ }
+ tsk->cpu_id = start_state->cpu_id;
+ list_head_init(&tsk->sched_link);
+ tsk->ptrace = 0;
+ tsk->flags = 0;
+ tsk->exit_status = 0;
+
+ /* Do architecture-specific initialization */
+ if ((status = arch_task_create(tsk, start_state)) != 0)
+ goto error2;
+
+ if (task)
+ *task = tsk;
+ return 0;
+
+error2:
+ if (tsk->aspace)
+ aspace_release(tsk->aspace);
+error1:
+ kmem_free_pages(task_union, TASK_ORDER);
+ return status;
+}
+
+int
+task_create(id_t id_request, const char *name,
+ const start_state_t *start_state, id_t *id)
+{
+ id_t new_id;
+ struct task_struct *new_task;
+ int status;
+ unsigned long irqstate;
+
+ /* Allocate an ID for the new task */
+ if ((status = idspace_alloc_id(idspace, id_request, &new_id)) != 0)
+ return status;
+
+ /* Create and initialize a new task */
+ if ((status = __task_create(new_id, name, start_state, &new_task))) {
+ idspace_free_id(idspace, new_id);
+ return status;
+ }
+
+ /* Add new task to a hash table, for quick lookups by ID */
+ spin_lock_irqsave(&htable_lock, irqstate);
+ BUG_ON(htable_add(htable, new_task));
+ spin_unlock_irqrestore(&htable_lock, irqstate);
+
+ /* Add the new task to the target CPU's run queue */
+ sched_add_task(new_task);
+
+ if (id)
+ *id = new_task->id;
+ return 0;
+}
+
+int
+sys_task_create(id_t id_request, const char __user *name,
+ const start_state_t __user *start_state, id_t __user *id)
+{
+ int status;
+ start_state_t _start_state;
+ user_cpumask_t _cpumask;
+ char _name[16];
+ id_t _id;
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if (copy_from_user(&_start_state, start_state, sizeof(_start_state)))
+ return -EINVAL;
+
+ if (_start_state.aspace_id == KERNEL_ASPACE_ID)
+ return -EINVAL;
+
+ if (_start_state.cpumask) {
+ if (copy_from_user(&_cpumask, _start_state.cpumask, sizeof(_cpumask)))
+ return -EINVAL;
+ _start_state.cpumask = &_cpumask;
+ }
+
+ if (name && (strncpy_from_user(_name, name, sizeof(_name)) < 0))
+ return -EFAULT;
+ _name[sizeof(_name) - 1] = '\0';
+
+ if ((status = task_create(id_request, _name, &_start_state, &_id)) != 0)
+ return status;
+
+ if (id && copy_to_user(id, &_id, sizeof(*id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+task_exit(int status)
+{
+ /* Mark the task as exited...
+ * schedule() will remove it from the run queue */
+ current->exit_status = status;
+ current->state = TASKSTATE_EXIT_ZOMBIE;
+ schedule(); /* task is dead, so this should never return */
+ BUG();
+ while (1) {}
+}
+
+int
+sys_task_exit(int status)
+{
+ return task_exit(status);
+}
+
+int
+task_yield(void)
+{
+ /*
+ * Nothing to do, schedule() will be automatically
+ * called before returning to user-space
+ */
+ return 0;
+}
+
+int
+sys_task_yield(void)
+{
+ return task_yield();
+}
--- /dev/null
+#include <lwk/time.h>
+#include <arch/div64.h>
+
+static uint64_t shift;
+static uint64_t mult;
+static uint64_t offset;
+
+/**
+ * Converts the input khz cycle counter frequency to a time source multiplier.
+ * The multiplier is used to convert cycle counts to nanoseconds.
+ */
+void
+init_cycles2ns(uint32_t khz)
+{
+ /*
+ * Shift is used to obtain greater precision.
+ * Linux uses 22 for the x86 time stamp counter.
+ * For now we assume this will work for most cases.
+ */
+ shift = 22;
+
+ /*
+ * khz = cyc/(Million ns)
+ * mult/2^shift = ns/cyc
+ * mult = ns/cyc * 2^shift
+ * mult = 1Million/khz * 2^shift
+ * mult = 1000000 * 2^shift / khz
+ * mult = (1000000<<shift) / khz
+ */
+ mult = ((u64)1000000) << shift;
+ mult += khz/2; /* round for do_div */
+ do_div(mult, khz);
+}
+
+/**
+ * Converts cycles to nanoseconds.
+ * init_cycles2ns() must be called before this will work properly.
+ */
+uint64_t
+cycles2ns(uint64_t cycles)
+{
+ return (cycles * mult) >> shift;
+}
+
+/**
+ * Returns the current time in nanoseconds.
+ */
+uint64_t
+get_time(void)
+{
+ return cycles2ns(get_cycles()) + offset;
+}
+
+/**
+ * Sets the current time in nanoseconds.
+ */
+void
+set_time(uint64_t ns)
+{
+ offset = ns - cycles2ns(get_cycles());
+}
+
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/spinlock.h>
+#include <lwk/percpu.h>
+#include <lwk/time.h>
+#include <lwk/timer.h>
+#include <lwk/sched.h>
+
+struct timer_queue {
+ spinlock_t lock;
+ struct list_head timer_list;
+};
+
+static DEFINE_PER_CPU(struct timer_queue, timer_queue);
+
+int
+timer_subsys_init(void)
+{
+ id_t cpu;
+ struct timer_queue *timerq;
+
+ for_each_cpu_mask(cpu, cpu_present_map) {
+ timerq = &per_cpu(timer_queue, cpu);
+ spin_lock_init(&timerq->lock);
+ list_head_init(&timerq->timer_list);
+ }
+
+ return 0;
+}
+
+void
+timer_add(struct timer *timer)
+{
+ struct timer_queue *timerq;
+ struct list_head *pos;
+ unsigned long irqstate;
+
+ timerq = &per_cpu(timer_queue, this_cpu);
+ spin_lock_irqsave(&timerq->lock, irqstate);
+
+ /* Initialize fields we don't expect the caller to set */
+ list_head_init(&timer->link);
+ timer->cpu = this_cpu;
+
+ /* Insert the new timer into the CPU's sorted timer_list */
+ list_for_each(pos, &timerq->timer_list) {
+ struct timer *cur = list_entry(pos, struct timer, link);
+ if (cur->expires > timer->expires)
+ break;
+ }
+ list_add_tail(&timer->link, pos);
+
+ spin_unlock_irqrestore(&timerq->lock, irqstate);
+}
+
+void
+timer_del(struct timer *timer)
+{
+ struct timer_queue *timerq;
+ unsigned long irqstate;
+
+ timerq = &per_cpu(timer_queue, timer->cpu);
+ spin_lock_irqsave(&timerq->lock, irqstate);
+
+ /* Remove the timer, if it hasn't already expired */
+ if (!list_empty(&timer->link))
+ list_del(&timer->link);
+
+ spin_unlock_irqrestore(&timerq->lock, irqstate);
+}
+
+static void
+wakeup_task(uintptr_t task)
+{
+ sched_wakeup_task((struct task_struct *)task, TASKSTATE_INTERRUPTIBLE);
+}
+
+/* Returns the time remaining */
+uint64_t
+timer_sleep_until(uint64_t when)
+{
+ struct timer timer;
+ uint64_t now;
+
+ timer.expires = when;
+ timer.function = &wakeup_task;
+ timer.data = (uintptr_t)current;
+ timer_add(&timer);
+
+ /* Go to sleep */
+ set_mb(current->state, TASKSTATE_INTERRUPTIBLE);
+ schedule();
+
+ /* Return the time remaining */
+ now = get_time();
+ return (when > now) ? (when - now) : 0;
+}
+
+void
+expire_timers(void)
+{
+ struct timer_queue *timerq = &per_cpu(timer_queue, this_cpu);
+ struct timer *timer;
+ uint64_t now = get_time();
+ unsigned long irqstate;
+
+ do {
+ /* Pop the head entry off of the timer list */
+ spin_lock_irqsave(&timerq->lock, irqstate);
+ if (!list_empty(&timerq->timer_list)) {
+ timer = list_entry(timerq->timer_list.next,
+ struct timer,
+ link);
+ if (timer->expires <= now) {
+ list_del_init(&timer->link);
+ } else {
+ timer = NULL;
+ }
+ } else {
+ timer = NULL;
+ }
+ spin_unlock_irqrestore(&timerq->lock, irqstate);
+
+ /* Execute the timer's callback function.
+ * Note that we have released the timerq->lock, so the
+ * callback function is free to call timer_add(). */
+ if (timer)
+ (*timer->function)(timer->data);
+ } while (timer);
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/waitq.h>
+#include <lwk/sched.h>
+
+void
+waitq_init(waitq_t *waitq)
+{
+ spin_lock_init(&waitq->lock);
+ list_head_init(&waitq->waitq);
+}
+
+void
+waitq_init_entry(waitq_entry_t *entry, struct task_struct *task)
+{
+ entry->task = task;
+ list_head_init(&entry->link);
+}
+
+bool
+waitq_active(waitq_t *waitq)
+{
+ bool active;
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&waitq->lock, irqstate);
+ active = !list_empty(&waitq->waitq);
+ spin_unlock_irqrestore(&waitq->lock, irqstate);
+
+ return active;
+}
+
+void
+waitq_add_entry(waitq_t *waitq, waitq_entry_t *entry)
+{
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&waitq->lock, irqstate);
+ BUG_ON(!list_empty(&entry->link));
+ list_add(&entry->link, &waitq->waitq);
+ spin_unlock_irqrestore(&waitq->lock, irqstate);
+}
+
+void
+waitq_remove_entry(waitq_t *waitq, waitq_entry_t *entry)
+{
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&waitq->lock, irqstate);
+ BUG_ON(list_empty(&entry->link));
+ list_del_init(&entry->link);
+ spin_unlock_irqrestore(&waitq->lock, irqstate);
+}
+
+void
+waitq_prepare_to_wait(waitq_t *waitq, waitq_entry_t *entry, taskstate_t state)
+{
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&waitq->lock, irqstate);
+ if (list_empty(&entry->link))
+ list_add(&entry->link, &waitq->waitq);
+ set_mb(entry->task->state, state);
+ spin_unlock_irqrestore(&waitq->lock, irqstate);
+}
+
+void
+waitq_finish_wait(waitq_t *waitq, waitq_entry_t *entry)
+{
+ set_mb(entry->task->state, TASKSTATE_READY);
+ waitq_remove_entry(waitq, entry);
+}
+
+void
+waitq_wakeup(waitq_t *waitq)
+{
+ unsigned long irqstate;
+ struct list_head *tmp;
+ waitq_entry_t *entry;
+
+ spin_lock_irqsave(&waitq->lock, irqstate);
+ list_for_each(tmp, &waitq->waitq) {
+ entry = list_entry(tmp, waitq_entry_t, link);
+ sched_wakeup_task(
+ entry->task,
+ (TASKSTATE_UNINTERRUPTIBLE | TASKSTATE_INTERRUPTIBLE)
+ );
+ }
+ spin_unlock_irqrestore(&waitq->lock, irqstate);
+}
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/smp.h>
+#include <lwk/xcall.h>
+
+/**
+ * Carries out an inter-CPU function call. The specified function is executed
+ * on all of the target CPUs that are currently online and executes in
+ * interrupt context with interrupts disabled... it must not block and should
+ * be short.
+ *
+ * Arguments:
+ * [IN] cpu_mask: The target CPUs of the cross-call.
+ * [IN] func: The function to execute on each target CPU.
+ * [IN] info: Argument to pass to func().
+ * [IN] wait: true = wait for cross-call to fully complete.
+ *
+ * Returns:
+ * Success: 0
+ * Failure: Error code
+ *
+ * NOTE: If wait=0, care must be taken to ensure that the data pointed to by
+ * the info argument remains valid until the cross-call function func()
+ * completes on all target CPUs.
+ */
+int
+xcall_function(
+ cpumask_t cpu_mask,
+ void (*func)(void *info),
+ void * info,
+ bool wait
+)
+{
+ bool contains_me;
+ int status;
+
+ BUG_ON(irqs_disabled());
+ BUG_ON(!func);
+
+ /* Only target online CPUs */
+ cpus_and(cpu_mask, cpu_mask, cpu_online_map);
+
+ /* No need to xcall ourself... we'll just call func() directly */
+ if ((contains_me = cpu_isset(this_cpu, cpu_mask)))
+ cpu_clear(this_cpu, cpu_mask);
+
+ /* Perform xcall to remote CPUs */
+ if ((status = arch_xcall_function(cpu_mask, func, info, wait)))
+ return status;
+
+ /* Call func() on the local CPU, if it was in cpu_mask */
+ if (contains_me)
+ (*func)(info);
+
+ return 0;
+}
+
+/**
+ * Sends a reschedule inter-processor interrupt to the target CPU.
+ * This causes the target CPU to call schedule().
+ *
+ * NOTE: It is safe to call this with locks held and interrupts
+ * disabled so long as the caller will drop the locks and
+ * re-enable interrupts "soon", independent of whether the
+ * target actually receives the reschedule interrupt.
+ * Deadlock may occur if these conditions aren't met.
+ */
+void
+xcall_reschedule(id_t cpu)
+{
+ arch_xcall_reschedule(cpu);
+}
--- /dev/null
+config DEBUG_KERNEL
+ bool "Kernel debugging"
+ help
+ Say Y here if you are developing drivers or trying to debug and
+ identify kernel problems.
+
+config LOG_BUF_SHIFT
+ int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
+ range 12 21
+ default 15
+ help
+ Select kernel log buffer size as a power of 2.
+ Defaults and Examples:
+ 17 => 128 KB
+ 16 => 64 KB
+ 15 => 32 KB
+ 14 => 16 KB
+ 13 => 8 KB
+ 12 => 4 KB
+
+config DEBUG_MUTEXES
+ bool "Mutex debugging, deadlock detection"
+ default n
+ depends on DEBUG_KERNEL
+ help
+ This allows mutex semantics violations and mutex related deadlocks
+ (lockups) to be detected and reported automatically.
+
+config DEBUG_SPINLOCK
+ bool "Spinlock debugging"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here and build SMP to catch missing spinlock initialization
+ and certain other kinds of spinlock errors commonly made. This is
+ best used in conjunction with the NMI watchdog so that spinlock
+ deadlocks are also debuggable.
+
+config DEBUG_SPINLOCK_SLEEP
+ bool "Sleep-inside-spinlock checking"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, various routines which may sleep will become very
+ noisy if they are called with a spinlock held.
+
+config DEBUG_BUGVERBOSE
+ bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
+ depends on BUG
+ depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV
+ default !EMBEDDED
+ help
+ Say Y here to make BUG() panics output the file name and line number
+ of the BUG call as well as the EIP and oops trace. This aids
+ debugging but costs about 70-100K of memory.
+
+config DEBUG_INFO
+ bool "Compile the kernel with debug info"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here the resulting kernel image will include
+ debugging info resulting in a larger kernel image.
+ Say Y here only if you plan to debug the kernel.
+
+ If unsure, say N.
+
+config FRAME_POINTER
+ bool "Compile the kernel with frame pointers"
+ depends on DEBUG_KERNEL
+ default y if DEBUG_INFO && UML
+ help
+ If you say Y here the resulting kernel image will be slightly larger
+ and slower, but it might give very useful debugging information on
+ some architectures or if you use external debuggers.
+ If you don't debug the kernel, you can say N.
+
+config UNWIND_INFO
+ bool "Compile the kernel with frame unwind information"
+ depends on !IA64
+ depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || V850)
+ help
+ If you say Y here the resulting kernel image will be slightly larger
+ but not slower, and it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame unwind information or frame pointers.
+
+config FORCED_INLINING
+ bool "Force gcc to inline functions marked 'inline'"
+ depends on DEBUG_KERNEL
+ default y
+ help
+ This option determines if the kernel forces gcc to inline the functions
+ developers have marked 'inline'. Doing so takes away freedom from gcc to
+ do what it thinks is best, which is desirable for the gcc 3.x series of
+ compilers. The gcc 4.x series have a rewritten inlining algorithm and
+ disabling this option will generate a smaller kernel there. Hopefully
+ this algorithm is so good that allowing gcc4 to make the decision can
+ become the default in the future, until then this option is there to
+ test gcc for this.
+
--- /dev/null
+#
+# Makefile for some libs needed by the kernel.
+#
+
+lib-y := vsprintf.o string.o ctype.o cmdline.o cpumask.o bitmap.o hweight.o \
+ extable.o sort.o
--- /dev/null
+/*
+ * lib/bitmap.c
+ * Helper functions for bitmap.h.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <lwk/kernel.h>
+#include <lwk/ctype.h>
+#include <lwk/errno.h>
+#include <lwk/bitmap.h>
+#include <lwk/bitops.h>
+#include <lwk/linux_compat.h>
+//#include <asm/uaccess.h>
+
+/*
+ * bitmaps provide an array of bits, implemented using an an
+ * array of unsigned longs. The number of valid bits in a
+ * given bitmap does _not_ need to be an exact multiple of
+ * BITS_PER_LONG.
+ *
+ * The possible unused bits in the last, partially used word
+ * of a bitmap are 'don't care'. The implementation makes
+ * no particular effort to keep them zero. It ensures that
+ * their value will not affect the results of any operation.
+ * The bitmap operations that return Boolean (bitmap_empty,
+ * for example) or scalar (bitmap_weight, for example) results
+ * carefully filter out these unused bits from impacting their
+ * results.
+ *
+ * These operations actually hold to a slightly stronger rule:
+ * if you don't input any bitmaps to these ops that have some
+ * unused bits set, then they won't output any set unused bits
+ * in output bitmaps.
+ *
+ * The byte ordering of bitmaps is more natural on little
+ * endian architectures. See the big-endian headers
+ * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
+ * for the best explanations of this ordering.
+ */
+
+int __bitmap_empty(const unsigned long *bitmap, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(__bitmap_empty);
+
+int __bitmap_full(const unsigned long *bitmap, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (~bitmap[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(__bitmap_full);
+
+int __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] != bitmap2[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(__bitmap_equal);
+
+void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ dst[k] = ~src[k];
+
+ if (bits % BITS_PER_LONG)
+ dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
+}
+EXPORT_SYMBOL(__bitmap_complement);
+
+/*
+ * __bitmap_shift_right - logical right shift of the bits in a bitmap
+ * @dst - destination bitmap
+ * @src - source bitmap
+ * @nbits - shift by this many bits
+ * @bits - bitmap size, in bits
+ *
+ * Shifting right (dividing) means moving bits in the MS -> LS bit
+ * direction. Zeros are fed into the vacated MS positions and the
+ * LS bits shifted off the bottom are lost.
+ */
+void __bitmap_shift_right(unsigned long *dst,
+ const unsigned long *src, int shift, int bits)
+{
+ int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
+ int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+ unsigned long mask = (1UL << left) - 1;
+ for (k = 0; off + k < lim; ++k) {
+ unsigned long upper, lower;
+
+ /*
+ * If shift is not word aligned, take lower rem bits of
+ * word above and make them the top rem bits of result.
+ */
+ if (!rem || off + k + 1 >= lim)
+ upper = 0;
+ else {
+ upper = src[off + k + 1];
+ if (off + k + 1 == lim - 1 && left)
+ upper &= mask;
+ }
+ lower = src[off + k];
+ if (left && off + k == lim - 1)
+ lower &= mask;
+ dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
+ if (left && k == lim - 1)
+ dst[k] &= mask;
+ }
+ if (off)
+ memset(&dst[lim - off], 0, off*sizeof(unsigned long));
+}
+EXPORT_SYMBOL(__bitmap_shift_right);
+
+
+/*
+ * __bitmap_shift_left - logical left shift of the bits in a bitmap
+ * @dst - destination bitmap
+ * @src - source bitmap
+ * @nbits - shift by this many bits
+ * @bits - bitmap size, in bits
+ *
+ * Shifting left (multiplying) means moving bits in the LS -> MS
+ * direction. Zeros are fed into the vacated LS bit positions
+ * and those MS bits shifted off the top are lost.
+ */
+
+void __bitmap_shift_left(unsigned long *dst,
+ const unsigned long *src, int shift, int bits)
+{
+ int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
+ int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+ for (k = lim - off - 1; k >= 0; --k) {
+ unsigned long upper, lower;
+
+ /*
+ * If shift is not word aligned, take upper rem bits of
+ * word below and make them the bottom rem bits of result.
+ */
+ if (rem && k > 0)
+ lower = src[k - 1];
+ else
+ lower = 0;
+ upper = src[k];
+ if (left && k == lim - 1)
+ upper &= (1UL << left) - 1;
+ dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
+ if (left && k + off == lim - 1)
+ dst[k + off] &= (1UL << left) - 1;
+ }
+ if (off)
+ memset(dst, 0, off*sizeof(unsigned long));
+}
+EXPORT_SYMBOL(__bitmap_shift_left);
+
+void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] & bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_and);
+
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] | bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_or);
+
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] ^ bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_xor);
+
+void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] & ~bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_andnot);
+
+int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] & bitmap2[k])
+ return 1;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(__bitmap_intersects);
+
+int __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] & ~bitmap2[k])
+ return 0;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(__bitmap_subset);
+
+int __bitmap_weight(const unsigned long *bitmap, int bits)
+{
+ int k, w = 0, lim = bits/BITS_PER_LONG;
+
+ for (k = 0; k < lim; k++)
+ w += hweight_long(bitmap[k]);
+
+ if (bits % BITS_PER_LONG)
+ w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+
+ return w;
+}
+EXPORT_SYMBOL(__bitmap_weight);
+
+/*
+ * Bitmap printing & parsing functions: first version by Bill Irwin,
+ * second version by Paul Jackson, third by Joe Korty.
+ */
+
+#define CHUNKSZ 32
+#define nbits_to_hold_value(val) fls(val)
+#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
+#define BASEDEC 10 /* fancier cpuset lists input in decimal */
+
+/**
+ * bitmap_scnprintf - convert bitmap to an ASCII hex string.
+ * @buf: byte buffer into which string is placed
+ * @buflen: reserved size of @buf, in bytes
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Exactly @nmaskbits bits are displayed. Hex digits are grouped into
+ * comma-separated sets of eight digits per set.
+ */
+int bitmap_scnprintf(char *buf, unsigned int buflen,
+ const unsigned long *maskp, int nmaskbits)
+{
+ int i, word, bit, len = 0;
+ unsigned long val;
+ const char *sep = "";
+ int chunksz;
+ u32 chunkmask;
+
+ chunksz = nmaskbits & (CHUNKSZ - 1);
+ if (chunksz == 0)
+ chunksz = CHUNKSZ;
+
+ i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
+ for (; i >= 0; i -= CHUNKSZ) {
+ chunkmask = ((1ULL << chunksz) - 1);
+ word = i / BITS_PER_LONG;
+ bit = i % BITS_PER_LONG;
+ val = (maskp[word] >> bit) & chunkmask;
+ len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep,
+ (chunksz+3)/4, val);
+ chunksz = CHUNKSZ;
+ sep = ",";
+ }
+ return len;
+}
+EXPORT_SYMBOL(bitmap_scnprintf);
+
+#if 0
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @buf: pointer to buffer in user space containing string.
+ * @buflen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks. Each chunk defines exactly 32
+ * bits of the resultant bitmask. No chunk may specify a value larger
+ * than 32 bits (-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended. -EINVAL is returned for illegal
+ * characters and for grouping errors such as "1,,5", ",44", "," and "".
+ * Leading and trailing whitespace accepted, but not embedded whitespace.
+ */
+int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
+ unsigned long *maskp, int nmaskbits)
+{
+ int c, old_c, totaldigits, ndigits, nchunks, nbits;
+ u32 chunk;
+
+ bitmap_zero(maskp, nmaskbits);
+
+ nchunks = nbits = totaldigits = c = 0;
+ do {
+ chunk = ndigits = 0;
+
+ /* Get the next chunk of the bitmap */
+ while (ubuflen) {
+ old_c = c;
+ if (get_user(c, ubuf++))
+ return -EFAULT;
+ ubuflen--;
+ if (isspace(c))
+ continue;
+
+ /*
+ * If the last character was a space and the current
+ * character isn't '\0', we've got embedded whitespace.
+ * This is a no-no, so throw an error.
+ */
+ if (totaldigits && c && isspace(old_c))
+ return -EINVAL;
+
+ /* A '\0' or a ',' signal the end of the chunk */
+ if (c == '\0' || c == ',')
+ break;
+
+ if (!isxdigit(c))
+ return -EINVAL;
+
+ /*
+ * Make sure there are at least 4 free bits in 'chunk'.
+ * If not, this hexdigit will overflow 'chunk', so
+ * throw an error.
+ */
+ if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
+ return -EOVERFLOW;
+
+ chunk = (chunk << 4) | unhex(c);
+ ndigits++; totaldigits++;
+ }
+ if (ndigits == 0)
+ return -EINVAL;
+ if (nchunks == 0 && chunk == 0)
+ continue;
+
+ __bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
+ *maskp |= chunk;
+ nchunks++;
+ nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
+ if (nbits > nmaskbits)
+ return -EOVERFLOW;
+ } while (ubuflen && c == ',');
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
+#endif
+
+/*
+ * bscnl_emit(buf, buflen, rbot, rtop, bp)
+ *
+ * Helper routine for bitmap_scnlistprintf(). Write decimal number
+ * or range to buf, suppressing output past buf+buflen, with optional
+ * comma-prefix. Return len of what would be written to buf, if it
+ * all fit.
+ */
+static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
+{
+ if (len > 0)
+ len += scnprintf(buf + len, buflen - len, ",");
+ if (rbot == rtop)
+ len += scnprintf(buf + len, buflen - len, "%d", rbot);
+ else
+ len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop);
+ return len;
+}
+
+/**
+ * bitmap_scnlistprintf - convert bitmap to list format ASCII string
+ * @buf: byte buffer into which string is placed
+ * @buflen: reserved size of @buf, in bytes
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges. Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range. Output format is compatible with the format
+ * accepted as input by bitmap_parselist().
+ *
+ * The return value is the number of characters which would be
+ * generated for the given input, excluding the trailing '\0', as
+ * per ISO C99.
+ */
+int bitmap_scnlistprintf(char *buf, unsigned int buflen,
+ const unsigned long *maskp, int nmaskbits)
+{
+ int len = 0;
+ /* current bit is 'cur', most recently seen range is [rbot, rtop] */
+ int cur, rbot, rtop;
+
+ rbot = cur = find_first_bit(maskp, nmaskbits);
+ while (cur < nmaskbits) {
+ rtop = cur;
+ cur = find_next_bit(maskp, nmaskbits, cur+1);
+ if (cur >= nmaskbits || cur > rtop + 1) {
+ len = bscnl_emit(buf, buflen, rbot, rtop, len);
+ rbot = cur;
+ }
+ }
+ return len;
+}
+EXPORT_SYMBOL(bitmap_scnlistprintf);
+
+/**
+ * bitmap_parselist - convert list format ASCII string to bitmap
+ * @buf: read nul-terminated user string from this buffer
+ * @mask: write resulting mask here
+ * @nmaskbits: number of bits in mask to be written
+ *
+ * Input format is a comma-separated list of decimal numbers and
+ * ranges. Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range.
+ *
+ * Returns 0 on success, -errno on invalid input strings:
+ * -EINVAL: second number in range smaller than first
+ * -EINVAL: invalid character in string
+ * -ERANGE: bit number specified too large for mask
+ */
+int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
+{
+ unsigned a, b;
+
+ bitmap_zero(maskp, nmaskbits);
+ do {
+ if (!isdigit(*bp))
+ return -EINVAL;
+ b = a = simple_strtoul(bp, (char **)&bp, BASEDEC);
+ if (*bp == '-') {
+ bp++;
+ if (!isdigit(*bp))
+ return -EINVAL;
+ b = simple_strtoul(bp, (char **)&bp, BASEDEC);
+ }
+ if (!(a <= b))
+ return -EINVAL;
+ if (b >= nmaskbits)
+ return -ERANGE;
+ while (a <= b) {
+ set_bit(a, maskp);
+ a++;
+ }
+ if (*bp == ',')
+ bp++;
+ } while (*bp != '\0' && *bp != '\n');
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parselist);
+
+/*
+ * bitmap_pos_to_ord(buf, pos, bits)
+ * @buf: pointer to a bitmap
+ * @pos: a bit position in @buf (0 <= @pos < @bits)
+ * @bits: number of valid bit positions in @buf
+ *
+ * Map the bit at position @pos in @buf (of length @bits) to the
+ * ordinal of which set bit it is. If it is not set or if @pos
+ * is not a valid bit position, map to -1.
+ *
+ * If for example, just bits 4 through 7 are set in @buf, then @pos
+ * values 4 through 7 will get mapped to 0 through 3, respectively,
+ * and other @pos values will get mapped to 0. When @pos value 7
+ * gets mapped to (returns) @ord value 3 in this example, that means
+ * that bit 7 is the 3rd (starting with 0th) set bit in @buf.
+ *
+ * The bit positions 0 through @bits are valid positions in @buf.
+ */
+static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
+{
+ int i, ord;
+
+ if (pos < 0 || pos >= bits || !test_bit(pos, buf))
+ return -1;
+
+ i = find_first_bit(buf, bits);
+ ord = 0;
+ while (i < pos) {
+ i = find_next_bit(buf, bits, i + 1);
+ ord++;
+ }
+ BUG_ON(i != pos);
+
+ return ord;
+}
+
+/**
+ * bitmap_ord_to_pos(buf, ord, bits)
+ * @buf: pointer to bitmap
+ * @ord: ordinal bit position (n-th set bit, n >= 0)
+ * @bits: number of valid bit positions in @buf
+ *
+ * Map the ordinal offset of bit @ord in @buf to its position in @buf.
+ * Value of @ord should be in range 0 <= @ord < weight(buf), else
+ * results are undefined.
+ *
+ * If for example, just bits 4 through 7 are set in @buf, then @ord
+ * values 0 through 3 will get mapped to 4 through 7, respectively,
+ * and all other @ord values return undefined values. When @ord value 3
+ * gets mapped to (returns) @pos value 7 in this example, that means
+ * that the 3rd set bit (starting with 0th) is at position 7 in @buf.
+ *
+ * The bit positions 0 through @bits are valid positions in @buf.
+ */
+static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
+{
+ int pos = 0;
+
+ if (ord >= 0 && ord < bits) {
+ int i;
+
+ for (i = find_first_bit(buf, bits);
+ i < bits && ord > 0;
+ i = find_next_bit(buf, bits, i + 1))
+ ord--;
+ if (i < bits && ord == 0)
+ pos = i;
+ }
+
+ return pos;
+}
+
+/**
+ * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
+ * @dst: remapped result
+ * @src: subset to be remapped
+ * @old: defines domain of map
+ * @new: defines range of map
+ * @bits: number of bits in each of these bitmaps
+ *
+ * Let @old and @new define a mapping of bit positions, such that
+ * whatever position is held by the n-th set bit in @old is mapped
+ * to the n-th set bit in @new. In the more general case, allowing
+ * for the possibility that the weight 'w' of @new is less than the
+ * weight of @old, map the position of the n-th set bit in @old to
+ * the position of the m-th set bit in @new, where m == n % w.
+ *
+ * If either of the @old and @new bitmaps are empty, or if @src and
+ * @dst point to the same location, then this routine copies @src
+ * to @dst.
+ *
+ * The positions of unset bits in @old are mapped to themselves
+ * (the identify map).
+ *
+ * Apply the above specified mapping to @src, placing the result in
+ * @dst, clearing any bits previously set in @dst.
+ *
+ * For example, lets say that @old has bits 4 through 7 set, and
+ * @new has bits 12 through 15 set. This defines the mapping of bit
+ * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
+ * bit positions unchanged. So if say @src comes into this routine
+ * with bits 1, 5 and 7 set, then @dst should leave with bits 1,
+ * 13 and 15 set.
+ */
+void bitmap_remap(unsigned long *dst, const unsigned long *src,
+ const unsigned long *old, const unsigned long *new,
+ int bits)
+{
+ int oldbit, w;
+
+ if (dst == src) /* following doesn't handle inplace remaps */
+ return;
+ bitmap_zero(dst, bits);
+
+ w = bitmap_weight(new, bits);
+ for (oldbit = find_first_bit(src, bits);
+ oldbit < bits;
+ oldbit = find_next_bit(src, bits, oldbit + 1)) {
+ int n = bitmap_pos_to_ord(old, oldbit, bits);
+ if (n < 0 || w == 0)
+ set_bit(oldbit, dst); /* identity map */
+ else
+ set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
+ }
+}
+EXPORT_SYMBOL(bitmap_remap);
+
+/**
+ * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
+ * @oldbit - bit position to be mapped
+ * @old: defines domain of map
+ * @new: defines range of map
+ * @bits: number of bits in each of these bitmaps
+ *
+ * Let @old and @new define a mapping of bit positions, such that
+ * whatever position is held by the n-th set bit in @old is mapped
+ * to the n-th set bit in @new. In the more general case, allowing
+ * for the possibility that the weight 'w' of @new is less than the
+ * weight of @old, map the position of the n-th set bit in @old to
+ * the position of the m-th set bit in @new, where m == n % w.
+ *
+ * The positions of unset bits in @old are mapped to themselves
+ * (the identify map).
+ *
+ * Apply the above specified mapping to bit position @oldbit, returning
+ * the new bit position.
+ *
+ * For example, lets say that @old has bits 4 through 7 set, and
+ * @new has bits 12 through 15 set. This defines the mapping of bit
+ * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
+ * bit positions unchanged. So if say @oldbit is 5, then this routine
+ * returns 13.
+ */
+int bitmap_bitremap(int oldbit, const unsigned long *old,
+ const unsigned long *new, int bits)
+{
+ int w = bitmap_weight(new, bits);
+ int n = bitmap_pos_to_ord(old, oldbit, bits);
+ if (n < 0 || w == 0)
+ return oldbit;
+ else
+ return bitmap_ord_to_pos(new, n % w, bits);
+}
+EXPORT_SYMBOL(bitmap_bitremap);
+
+/*
+ * Common code for bitmap_*_region() routines.
+ * bitmap: array of unsigned longs corresponding to the bitmap
+ * pos: the beginning of the region
+ * order: region size (log base 2 of number of bits)
+ * reg_op: operation(s) to perform on that region of bitmap
+ *
+ * Can set, verify and/or release a region of bits in a bitmap,
+ * depending on which combination of REG_OP_* flag bits is set.
+ *
+ * A region of a bitmap is a sequence of bits in the bitmap, of
+ * some size '1 << order' (a power of two), aligned to that same
+ * '1 << order' power of two.
+ *
+ * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
+ * Returns 0 in all other cases and reg_ops.
+ */
+
+enum {
+ REG_OP_ISFREE, /* true if region is all zero bits */
+ REG_OP_ALLOC, /* set all bits in region */
+ REG_OP_RELEASE, /* clear all bits in region */
+};
+
+static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
+{
+ int nbits_reg; /* number of bits in region */
+ int index; /* index first long of region in bitmap */
+ int offset; /* bit offset region in bitmap[index] */
+ int nlongs_reg; /* num longs spanned by region in bitmap */
+ int nbitsinlong; /* num bits of region in each spanned long */
+ unsigned long mask; /* bitmask for one long of region */
+ int i; /* scans bitmap by longs */
+ int ret = 0; /* return value */
+
+ /*
+ * Either nlongs_reg == 1 (for small orders that fit in one long)
+ * or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
+ */
+ nbits_reg = 1 << order;
+ index = pos / BITS_PER_LONG;
+ offset = pos - (index * BITS_PER_LONG);
+ nlongs_reg = BITS_TO_LONGS(nbits_reg);
+ nbitsinlong = min(nbits_reg, BITS_PER_LONG);
+
+ /*
+ * Can't do "mask = (1UL << nbitsinlong) - 1", as that
+ * overflows if nbitsinlong == BITS_PER_LONG.
+ */
+ mask = (1UL << (nbitsinlong - 1));
+ mask += mask - 1;
+ mask <<= offset;
+
+ switch (reg_op) {
+ case REG_OP_ISFREE:
+ for (i = 0; i < nlongs_reg; i++) {
+ if (bitmap[index + i] & mask)
+ goto done;
+ }
+ ret = 1; /* all bits in region free (zero) */
+ break;
+
+ case REG_OP_ALLOC:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] |= mask;
+ break;
+
+ case REG_OP_RELEASE:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] &= ~mask;
+ break;
+ }
+done:
+ return ret;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Return the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+{
+ int pos; /* scans bitmap by regions of size order */
+
+ for (pos = 0; pos < bits; pos += (1 << order))
+ if (__reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ break;
+ if (pos == bits)
+ return -ENOMEM;
+ __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return pos;
+}
+EXPORT_SYMBOL(bitmap_find_free_region);
+
+/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region and releases
+ * the found region (by clearing it in the bitmap).
+ *
+ * No return value.
+ */
+void bitmap_release_region(unsigned long *bitmap, int pos, int order)
+{
+ __reg_op(bitmap, pos, order, REG_OP_RELEASE);
+}
+EXPORT_SYMBOL(bitmap_release_region);
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Return 0 on success, or -EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+{
+ if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ return -EBUSY;
+ __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_allocate_region);
--- /dev/null
+/*
+ * linux/lib/cmdline.c
+ * Helper functions generally used for parsing kernel command line
+ * and module options.
+ *
+ * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ *
+ * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
+ *
+ */
+
+#include <lwk/kernel.h>
+#include <lwk/string.h>
+#include <lwk/linux_compat.h>
+
+
+/**
+ * get_option - Parse integer from an option string
+ * @str: option string
+ * @pint: (output) integer value parsed from @str
+ *
+ * Read an int from an option string; if available accept a subsequent
+ * comma as well.
+ *
+ * Return values:
+ * 0 : no int in string
+ * 1 : int found, no subsequent comma
+ * 2 : int found including a subsequent comma
+ */
+
+int get_option (char **str, int *pint)
+{
+ char *cur = *str;
+
+ if (!cur || !(*cur))
+ return 0;
+ *pint = simple_strtol (cur, str, 0);
+ if (cur == *str)
+ return 0;
+ if (**str == ',') {
+ (*str)++;
+ return 2;
+ }
+
+ return 1;
+}
+
+/**
+ * get_options - Parse a string into a list of integers
+ * @str: String to be parsed
+ * @nints: size of integer array
+ * @ints: integer array
+ *
+ * This function parses a string containing a comma-separated
+ * list of integers. The parse halts when the array is
+ * full, or when no more numbers can be retrieved from the
+ * string.
+ *
+ * Return value is the character in the string which caused
+ * the parse to end (typically a null terminator, if @str is
+ * completely parseable).
+ */
+
+char *get_options(const char *str, int nints, int *ints)
+{
+ int res, i = 1;
+
+ while (i < nints) {
+ res = get_option ((char **)&str, ints + i);
+ if (res == 0)
+ break;
+ i++;
+ if (res == 1)
+ break;
+ }
+ ints[0] = i - 1;
+ return (char *)str;
+}
+
+/**
+ * memparse - parse a string with mem suffixes into a number
+ * @ptr: Where parse begins
+ * @retptr: (output) Pointer to next char after parse completes
+ *
+ * Parses a string into a number. The number stored at @ptr is
+ * potentially suffixed with %K (for kilobytes, or 1024 bytes),
+ * %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
+ * 1073741824). If the number is suffixed with K, M, or G, then
+ * the return value is the number multiplied by one kilobyte, one
+ * megabyte, or one gigabyte, respectively.
+ */
+
+unsigned long long memparse (char *ptr, char **retptr)
+{
+ unsigned long long ret = simple_strtoull (ptr, retptr, 0);
+
+ switch (**retptr) {
+ case 'G':
+ case 'g':
+ ret <<= 10;
+ case 'M':
+ case 'm':
+ ret <<= 10;
+ case 'K':
+ case 'k':
+ ret <<= 10;
+ (*retptr)++;
+ default:
+ break;
+ }
+ return ret;
+}
+
+
+EXPORT_SYMBOL(memparse);
+EXPORT_SYMBOL(get_option);
+EXPORT_SYMBOL(get_options);
--- /dev/null
+#include <lwk/kernel.h>
+#include <lwk/bitops.h>
+#include <lwk/cpumask.h>
+
+int __first_cpu(const cpumask_t *srcp)
+{
+ return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
+}
+
+int __next_cpu(int n, const cpumask_t *srcp)
+{
+ return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
+}
+
+/*
+ * Find the highest possible smp_cpu_id()
+ *
+ * Note: if we're prepared to assume that cpu_possible_map never changes
+ * (reasonable) then this function should cache its return value.
+ */
+int highest_possible_cpu_id(void)
+{
+ unsigned int cpu;
+ unsigned highest = 0;
+
+ for_each_cpu_mask(cpu, cpu_present_map)
+ highest = cpu;
+ return highest;
+}
+
+int __any_online_cpu(const cpumask_t *mask)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, *mask) {
+ if (cpu_online(cpu))
+ break;
+ }
+ return cpu;
+}
--- /dev/null
+/*
+ * linux/lib/ctype.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <lwk/ctype.h>
+#include <lwk/linux_compat.h>
+
+unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+
+EXPORT_SYMBOL(_ctype);
--- /dev/null
+/*
+ * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
+ *
+ * Copyright (C) 2004 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <lwk/init.h>
+#include <lwk/sort.h>
+#include <lwk/extable.h>
+
+#ifndef ARCH_HAS_SORT_EXTABLE
+/*
+ * The exception table needs to be sorted so that the binary
+ * search that we use to find entries in it works properly.
+ * This is used both for the kernel exception table and for
+ * the exception tables of modules that get loaded.
+ */
+static int cmp_ex(const void *a, const void *b)
+{
+ const struct exception_table_entry *x = a, *y = b;
+
+ /* avoid overflow */
+ if (x->insn > y->insn)
+ return 1;
+ if (x->insn < y->insn)
+ return -1;
+ return 0;
+}
+
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+{
+ sort(start, finish - start, sizeof(struct exception_table_entry),
+ cmp_ex, NULL);
+}
+#endif
+
+#ifndef ARCH_HAS_SEARCH_EXTABLE
+/*
+ * Search one exception table for an entry corresponding to the
+ * given instruction address, and return the address of the entry,
+ * or NULL if none is found.
+ * We use a binary search, and thus we assume that the table is
+ * already sorted.
+ */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+
+ mid = (last - first) / 2 + first;
+ /*
+ * careful, the distance between entries can be
+ * larger than 2GB:
+ */
+ if (mid->insn < value)
+ first = mid + 1;
+ else if (mid->insn > value)
+ last = mid - 1;
+ else
+ return mid;
+ }
+ return NULL;
+}
+#endif
--- /dev/null
+#include <lwk/linux_compat.h>
+#include <arch/types.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int hweight32(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+}
+EXPORT_SYMBOL(hweight32);
+
+unsigned int hweight16(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x5555);
+ res = (res & 0x3333) + ((res >> 2) & 0x3333);
+ res = (res + (res >> 4)) & 0x0F0F;
+ return (res + (res >> 8)) & 0x00FF;
+}
+EXPORT_SYMBOL(hweight16);
+
+unsigned int hweight8(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55);
+ res = (res & 0x33) + ((res >> 2) & 0x33);
+ return (res + (res >> 4)) & 0x0F;
+}
+EXPORT_SYMBOL(hweight8);
+
+unsigned long hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+EXPORT_SYMBOL(hweight64);
--- /dev/null
+#define DEBG(x)
+#define DEBG1(x)
+/* inflate.c -- Not copyrighted 1992 by Mark Adler
+ version c10p1, 10 January 1993 */
+
+/*
+ * Adapted for booting Linux by Hannu Savolainen 1993
+ * based on gzip-1.0.3
+ *
+ * Nicolas Pitre <nico@cam.org>, 1999/04/14 :
+ * Little mods for all variable to reside either into rodata or bss segments
+ * by marking constant variables with 'const' and initializing all the others
+ * at run-time only. This allows for the kernel uncompressor to run
+ * directly from Flash or ROM memory on embedded systems.
+ */
+
+/*
+ Inflate deflated (PKZIP's method 8 compressed) data. The compression
+ method searches for as much of the current string of bytes (up to a
+ length of 258) in the previous 32 K bytes. If it doesn't find any
+ matches (of at least length 3), it codes the next byte. Otherwise, it
+ codes the length of the matched string and its distance backwards from
+ the current position. There is a single Huffman code that codes both
+ single bytes (called "literals") and match lengths. A second Huffman
+ code codes the distance information, which follows a length code. Each
+ length or distance code actually represents a base value and a number
+ of "extra" (sometimes zero) bits to get to add to the base value. At
+ the end of each deflated block is a special end-of-block (EOB) literal/
+ length code. The decoding process is basically: get a literal/length
+ code; if EOB then done; if a literal, emit the decoded byte; if a
+ length then get the distance and emit the referred-to bytes from the
+ sliding window of previously emitted data.
+
+ There are (currently) three kinds of inflate blocks: stored, fixed, and
+ dynamic. The compressor deals with some chunk of data at a time, and
+ decides which method to use on a chunk-by-chunk basis. A chunk might
+ typically be 32 K or 64 K. If the chunk is incompressible, then the
+ "stored" method is used. In this case, the bytes are simply stored as
+ is, eight bits per byte, with none of the above coding. The bytes are
+ preceded by a count, since there is no longer an EOB code.
+
+ If the data is compressible, then either the fixed or dynamic methods
+ are used. In the dynamic method, the compressed data is preceded by
+ an encoding of the literal/length and distance Huffman codes that are
+ to be used to decode this block. The representation is itself Huffman
+ coded, and so is preceded by a description of that code. These code
+ descriptions take up a little space, and so for small blocks, there is
+ a predefined set of codes, called the fixed codes. The fixed method is
+ used if the block codes up smaller that way (usually for quite small
+ chunks), otherwise the dynamic method is used. In the latter case, the
+ codes are customized to the probabilities in the current block, and so
+ can code it much better than the pre-determined fixed codes.
+
+ The Huffman codes themselves are decoded using a multi-level table
+ lookup, in order to maximize the speed of decoding plus the speed of
+ building the decoding tables. See the comments below that precede the
+ lbits and dbits tuning parameters.
+ */
+
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarly, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+#include <lwk/compiler.h>
+
+#ifdef RCSID
+static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
+#endif
+
+#ifndef STATIC
+
+#if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H)
+# include <sys/types.h>
+# include <stdlib.h>
+#endif
+
+#include "gzip.h"
+#define STATIC
+#endif /* !STATIC */
+
+#ifndef INIT
+#define INIT
+#endif
+
+#define slide window
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model).
+ Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16
+ means that v is a literal, 16 < e < 32 means that v is a pointer to
+ the next table, which codes e - 16 bits, and lastly e == 99 indicates
+ an unused code. If a code with e == 99 is looked up, this implies an
+ error in the data. */
+struct huft {
+ uch e; /* number of extra bits or operation */
+ uch b; /* number of bits in this code or subcode */
+ union {
+ ush n; /* literal, length base, or distance base */
+ struct huft *t; /* pointer to next level of table */
+ } v;
+};
+
+
+/* Function prototypes */
+STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned,
+ const ush *, const ush *, struct huft **, int *));
+STATIC int INIT huft_free OF((struct huft *));
+STATIC int INIT inflate_codes OF((struct huft *, struct huft *, int, int));
+STATIC int INIT inflate_stored OF((void));
+STATIC int INIT inflate_fixed OF((void));
+STATIC int INIT inflate_dynamic OF((void));
+STATIC int INIT inflate_block OF((int *));
+STATIC int INIT inflate OF((void));
+
+
+/* The inflate algorithm uses a sliding 32 K byte window on the uncompressed
+ stream to find repeated byte strings. This is implemented here as a
+ circular buffer. The index is updated simply by incrementing and then
+ ANDing with 0x7fff (32K-1). */
+/* It is left to other modules to supply the 32 K area. It is assumed
+ to be usable as if it were declared "uch slide[32768];" or as just
+ "uch *slide;" and then malloc'ed in the latter case. The definition
+ must be in unzip.h, included above. */
+/* unsigned wp; current position in slide */
+#define wp outcnt
+#define flush_output(w) (wp=(w),flush_window())
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+static const unsigned border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* note: see note #13 above about the 258 in this list. */
+static const ush cplext[] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
+static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+static const ush cpdext[] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+
+
+/* Macros for inflate() bit peeking and grabbing.
+ The usage is:
+
+ NEEDBITS(j)
+ x = b & mask_bits[j];
+ DUMPBITS(j)
+
+ where NEEDBITS makes sure that b has at least j bits in it, and
+ DUMPBITS removes the bits from b. The macros use the variable k
+ for the number of bits in b. Normally, b and k are register
+ variables for speed, and are initialized at the beginning of a
+ routine that uses these macros from a global bit buffer and count.
+
+ If we assume that EOB will be the longest code, then we will never
+ ask for bits with NEEDBITS that are beyond the end of the stream.
+ So, NEEDBITS should not read any more bytes than are needed to
+ meet the request. Then no bytes need to be "returned" to the buffer
+ at the end of the last block.
+
+ However, this assumption is not true for fixed blocks--the EOB code
+ is 7 bits, but the other literal/length codes can be 8 or 9 bits.
+ (The EOB code is shorter than other codes because fixed blocks are
+ generally short. So, while a block always has an EOB, many other
+ literal/length codes have a significantly lower probability of
+ showing up at all.) However, by making the first table have a
+ lookup of seven bits, the EOB code will be found in that first
+ lookup, and so will not require that too many bits be pulled from
+ the stream.
+ */
+
+STATIC ulg bb; /* bit buffer */
+STATIC unsigned bk; /* bits in bit buffer */
+
+STATIC const ush mask_bits[] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+#define NEXTBYTE() ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; })
+#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
+#define DUMPBITS(n) {b>>=(n);k-=(n);}
+
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+STATIC const int lbits = 9; /* bits in base literal/length lookup table */
+STATIC const int dbits = 6; /* bits in base distance lookup table */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
+#define BMAX 16 /* maximum bit length of any code (16 for explode) */
+#define N_MAX 288 /* maximum number of codes in any set */
+
+
+STATIC unsigned hufts; /* track memory usage */
+
+
+STATIC int INIT huft_build(
+ unsigned *b, /* code lengths in bits (all assumed <= BMAX) */
+ unsigned n, /* number of codes (assumed <= N_MAX) */
+ unsigned s, /* number of simple-valued codes (0..s-1) */
+ const ush *d, /* list of base values for non-simple codes */
+ const ush *e, /* list of extra bits for non-simple codes */
+ struct huft **t, /* result: starting table */
+ int *m /* maximum lookup bits, returns actual */
+ )
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return zero on success, one if
+ the given code set is incomplete (the tables are still built in this
+ case), two if the input is invalid (all zero length codes or an
+ oversubscribed set of lengths), and three if not enough memory. */
+{
+ unsigned a; /* counter for codes of length k */
+ unsigned f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register unsigned i; /* counter, current code */
+ register unsigned j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register unsigned *p; /* pointer into c[], b[], or v[] */
+ register struct huft *q; /* points to current table */
+ struct huft r; /* table entry for structure assignment */
+ register int w; /* bits before this table == (l * h) */
+ unsigned *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ unsigned z; /* number of entries in current table */
+ struct {
+ unsigned c[BMAX+1]; /* bit length count table */
+ struct huft *u[BMAX]; /* table stack */
+ unsigned v[N_MAX]; /* values in order of bit length */
+ unsigned x[BMAX+1]; /* bit offsets, then code stack */
+ } *stk;
+ unsigned *c, *v, *x;
+ struct huft **u;
+ int ret;
+
+DEBG("huft1 ");
+
+ stk = malloc(sizeof(*stk));
+ if (stk == NULL)
+ return 3; /* out of memory */
+
+ c = stk->c;
+ v = stk->v;
+ x = stk->x;
+ u = stk->u;
+
+ /* Generate counts for each bit length */
+ memzero(stk->c, sizeof(stk->c));
+ p = b; i = n;
+ do {
+ Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"),
+ n-i, *p));
+ c[*p]++; /* assume all entries <= BMAX */
+ p++; /* Can't combine with above line (Solaris bug) */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (struct huft *)NULL;
+ *m = 0;
+ ret = 2;
+ goto out;
+ }
+
+DEBG("huft2 ");
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((unsigned)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((unsigned)l > i)
+ l = i;
+ *m = l;
+
+DEBG("huft3 ");
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0) {
+ ret = 2; /* bad input: more codes than bits */
+ goto out;
+ }
+ if ((y -= c[i]) < 0) {
+ ret = 2;
+ goto out;
+ }
+ c[i] += y;
+
+DEBG("huft4 ");
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+DEBG("huft5 ");
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+ n = x[g]; /* set n to length of v */
+
+DEBG("h6 ");
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (struct huft *)NULL; /* just to keep compilers happy */
+ q = (struct huft *)NULL; /* ditto */
+ z = 0; /* ditto */
+DEBG("h6a ");
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+DEBG("h6b ");
+ a = c[k];
+ while (a--)
+ {
+DEBG("h6b1 ");
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+DEBG1("1 ");
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+DEBG1("2 ");
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+DEBG1("3 ");
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
+ (struct huft *)NULL)
+ {
+ if (h)
+ huft_free(u[0]);
+ ret = 3; /* not enough memory */
+ goto out;
+ }
+DEBG1("4 ");
+ hufts += z + 1; /* track memory usage */
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->v.t)) = (struct huft *)NULL;
+ u[h] = ++q; /* table starts after link */
+
+DEBG1("5 ");
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.b = (uch)l; /* bits to dump before this table */
+ r.e = (uch)(16 + j); /* bits in this table */
+ r.v.t = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h-1][j] = r; /* connect to last table */
+ }
+DEBG1("6 ");
+ }
+DEBG("h6c ");
+
+ /* set up table entry in r */
+ r.b = (uch)(k - w);
+ if (p >= v + n)
+ r.e = 99; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */
+ r.v.n = (ush)(*p); /* simple code is just the value */
+ p++; /* one compiler does not like *p++ */
+ }
+ else
+ {
+ r.e = (uch)e[*p - s]; /* non-simple--look up in lists */
+ r.v.n = d[*p++ - s];
+ }
+DEBG("h6d ");
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+DEBG("h6e ");
+ }
+DEBG("h6f ");
+ }
+
+DEBG("huft7 ");
+
+ /* Return true (1) if we were given an incomplete table */
+ ret = y != 0 && g != 1;
+
+ out:
+ free(stk);
+ return ret;
+}
+
+
+
+STATIC int INIT huft_free(
+ struct huft *t /* table to free */
+ )
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+ list of the tables it made, with the links in a dummy first entry of
+ each table. */
+{
+ register struct huft *p, *q;
+
+
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ p = t;
+ while (p != (struct huft *)NULL)
+ {
+ q = (--p)->v.t;
+ free((char*)p);
+ p = q;
+ }
+ return 0;
+}
+
+
+STATIC int INIT inflate_codes(
+ struct huft *tl, /* literal/length decoder tables */
+ struct huft *td, /* distance decoder tables */
+ int bl, /* number of bits decoded by tl[] */
+ int bd /* number of bits decoded by td[] */
+ )
+/* inflate (decompress) the codes in a deflated (compressed) block.
+ Return an error code or zero if it all goes ok. */
+{
+ register unsigned e; /* table entry flag/number of extra bits */
+ unsigned n, d; /* length and index for copy */
+ unsigned w; /* current window position */
+ struct huft *t; /* pointer to table entry */
+ unsigned ml, md; /* masks for bl and bd bits */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+
+
+ /* make local copies of globals */
+ b = bb; /* initialize bit buffer */
+ k = bk;
+ w = wp; /* initialize window position */
+
+ /* inflate the coded data */
+ ml = mask_bits[bl]; /* precompute masks for speed */
+ md = mask_bits[bd];
+ for (;;) /* do until end of block */
+ {
+ NEEDBITS((unsigned)bl)
+ if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
+ do {
+ if (e == 99)
+ return 1;
+ DUMPBITS(t->b)
+ e -= 16;
+ NEEDBITS(e)
+ } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
+ DUMPBITS(t->b)
+ if (e == 16) /* then it's a literal */
+ {
+ slide[w++] = (uch)t->v.n;
+ Tracevv((stderr, "%c", slide[w-1]));
+ if (w == WSIZE)
+ {
+ flush_output(w);
+ w = 0;
+ }
+ }
+ else /* it's an EOB or a length */
+ {
+ /* exit if end of block */
+ if (e == 15)
+ break;
+
+ /* get length of block to copy */
+ NEEDBITS(e)
+ n = t->v.n + ((unsigned)b & mask_bits[e]);
+ DUMPBITS(e);
+
+ /* decode distance of block to copy */
+ NEEDBITS((unsigned)bd)
+ if ((e = (t = td + ((unsigned)b & md))->e) > 16)
+ do {
+ if (e == 99)
+ return 1;
+ DUMPBITS(t->b)
+ e -= 16;
+ NEEDBITS(e)
+ } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
+ DUMPBITS(t->b)
+ NEEDBITS(e)
+ d = w - t->v.n - ((unsigned)b & mask_bits[e]);
+ DUMPBITS(e)
+ Tracevv((stderr,"\\[%d,%d]", w-d, n));
+
+ /* do the copy */
+ do {
+ n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e);
+#if !defined(NOMEMCPY) && !defined(DEBUG)
+ if (w - d >= e) /* (this test assumes unsigned comparison) */
+ {
+ memcpy(slide + w, slide + d, e);
+ w += e;
+ d += e;
+ }
+ else /* do it slow to avoid memcpy() overlap */
+#endif /* !NOMEMCPY */
+ do {
+ slide[w++] = slide[d++];
+ Tracevv((stderr, "%c", slide[w-1]));
+ } while (--e);
+ if (w == WSIZE)
+ {
+ flush_output(w);
+ w = 0;
+ }
+ } while (n);
+ }
+ }
+
+
+ /* restore the globals from the locals */
+ wp = w; /* restore global window pointer */
+ bb = b; /* restore global bit buffer */
+ bk = k;
+
+ /* done */
+ return 0;
+
+ underrun:
+ return 4; /* Input underrun */
+}
+
+
+
+STATIC int INIT inflate_stored(void)
+/* "decompress" an inflated type 0 (stored) block. */
+{
+ unsigned n; /* number of bytes in block */
+ unsigned w; /* current window position */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+
+DEBG("<stor");
+
+ /* make local copies of globals */
+ b = bb; /* initialize bit buffer */
+ k = bk;
+ w = wp; /* initialize window position */
+
+
+ /* go to byte boundary */
+ n = k & 7;
+ DUMPBITS(n);
+
+
+ /* get the length and its complement */
+ NEEDBITS(16)
+ n = ((unsigned)b & 0xffff);
+ DUMPBITS(16)
+ NEEDBITS(16)
+ if (n != (unsigned)((~b) & 0xffff))
+ return 1; /* error in compressed data */
+ DUMPBITS(16)
+
+
+ /* read and output the compressed data */
+ while (n--)
+ {
+ NEEDBITS(8)
+ slide[w++] = (uch)b;
+ if (w == WSIZE)
+ {
+ flush_output(w);
+ w = 0;
+ }
+ DUMPBITS(8)
+ }
+
+
+ /* restore the globals from the locals */
+ wp = w; /* restore global window pointer */
+ bb = b; /* restore global bit buffer */
+ bk = k;
+
+ DEBG(">");
+ return 0;
+
+ underrun:
+ return 4; /* Input underrun */
+}
+
+
+/*
+ * We use `noinline' here to prevent gcc-3.5 from using too much stack space
+ */
+STATIC int noinline INIT inflate_fixed(void)
+/* decompress an inflated type 1 (fixed Huffman codes) block. We should
+ either replace this with a custom decoder, or at least precompute the
+ Huffman tables. */
+{
+ int i; /* temporary variable */
+ struct huft *tl; /* literal/length code table */
+ struct huft *td; /* distance code table */
+ int bl; /* lookup bits for tl */
+ int bd; /* lookup bits for td */
+ unsigned *l; /* length list for huft_build */
+
+DEBG("<fix");
+
+ l = malloc(sizeof(*l) * 288);
+ if (l == NULL)
+ return 3; /* out of memory */
+
+ /* set up literal table */
+ for (i = 0; i < 144; i++)
+ l[i] = 8;
+ for (; i < 256; i++)
+ l[i] = 9;
+ for (; i < 280; i++)
+ l[i] = 7;
+ for (; i < 288; i++) /* make a complete, but wrong code set */
+ l[i] = 8;
+ bl = 7;
+ if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) {
+ free(l);
+ return i;
+ }
+
+ /* set up distance table */
+ for (i = 0; i < 30; i++) /* make an incomplete code set */
+ l[i] = 5;
+ bd = 5;
+ if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1)
+ {
+ huft_free(tl);
+ free(l);
+
+ DEBG(">");
+ return i;
+ }
+
+
+ /* decompress until an end-of-block code */
+ if (inflate_codes(tl, td, bl, bd)) {
+ free(l);
+ return 1;
+ }
+
+ /* free the decoding tables, return */
+ free(l);
+ huft_free(tl);
+ huft_free(td);
+ return 0;
+}
+
+
+/*
+ * We use `noinline' here to prevent gcc-3.5 from using too much stack space
+ */
+STATIC int noinline INIT inflate_dynamic(void)
+/* decompress an inflated type 2 (dynamic Huffman codes) block. */
+{
+ int i; /* temporary variables */
+ unsigned j;
+ unsigned l; /* last length */
+ unsigned m; /* mask for bit lengths table */
+ unsigned n; /* number of lengths to get */
+ struct huft *tl; /* literal/length code table */
+ struct huft *td; /* distance code table */
+ int bl; /* lookup bits for tl */
+ int bd; /* lookup bits for td */
+ unsigned nb; /* number of bit length codes */
+ unsigned nl; /* number of literal/length codes */
+ unsigned nd; /* number of distance codes */
+ unsigned *ll; /* literal/length and distance code lengths */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+ int ret;
+
+DEBG("<dyn");
+
+#ifdef PKZIP_BUG_WORKAROUND
+ ll = malloc(sizeof(*ll) * (288+32)); /* literal/length and distance code lengths */
+#else
+ ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */
+#endif
+
+ /* make local bit buffer */
+ b = bb;
+ k = bk;
+
+
+ /* read in table lengths */
+ NEEDBITS(5)
+ nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */
+ DUMPBITS(5)
+ NEEDBITS(5)
+ nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */
+ DUMPBITS(5)
+ NEEDBITS(4)
+ nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */
+ DUMPBITS(4)
+#ifdef PKZIP_BUG_WORKAROUND
+ if (nl > 288 || nd > 32)
+#else
+ if (nl > 286 || nd > 30)
+#endif
+ {
+ ret = 1; /* bad lengths */
+ goto out;
+ }
+
+DEBG("dyn1 ");
+
+ /* read in bit-length-code lengths */
+ for (j = 0; j < nb; j++)
+ {
+ NEEDBITS(3)
+ ll[border[j]] = (unsigned)b & 7;
+ DUMPBITS(3)
+ }
+ for (; j < 19; j++)
+ ll[border[j]] = 0;
+
+DEBG("dyn2 ");
+
+ /* build decoding table for trees--single level, 7 bit lookup */
+ bl = 7;
+ if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0)
+ {
+ if (i == 1)
+ huft_free(tl);
+ ret = i; /* incomplete code set */
+ goto out;
+ }
+
+DEBG("dyn3 ");
+
+ /* read in literal and distance code lengths */
+ n = nl + nd;
+ m = mask_bits[bl];
+ i = l = 0;
+ while ((unsigned)i < n)
+ {
+ NEEDBITS((unsigned)bl)
+ j = (td = tl + ((unsigned)b & m))->b;
+ DUMPBITS(j)
+ j = td->v.n;
+ if (j < 16) /* length of code in bits (0..15) */
+ ll[i++] = l = j; /* save last length in l */
+ else if (j == 16) /* repeat last length 3 to 6 times */
+ {
+ NEEDBITS(2)
+ j = 3 + ((unsigned)b & 3);
+ DUMPBITS(2)
+ if ((unsigned)i + j > n) {
+ ret = 1;
+ goto out;
+ }
+ while (j--)
+ ll[i++] = l;
+ }
+ else if (j == 17) /* 3 to 10 zero length codes */
+ {
+ NEEDBITS(3)
+ j = 3 + ((unsigned)b & 7);
+ DUMPBITS(3)
+ if ((unsigned)i + j > n) {
+ ret = 1;
+ goto out;
+ }
+ while (j--)
+ ll[i++] = 0;
+ l = 0;
+ }
+ else /* j == 18: 11 to 138 zero length codes */
+ {
+ NEEDBITS(7)
+ j = 11 + ((unsigned)b & 0x7f);
+ DUMPBITS(7)
+ if ((unsigned)i + j > n) {
+ ret = 1;
+ goto out;
+ }
+ while (j--)
+ ll[i++] = 0;
+ l = 0;
+ }
+ }
+
+DEBG("dyn4 ");
+
+ /* free decoding table for trees */
+ huft_free(tl);
+
+DEBG("dyn5 ");
+
+ /* restore the global bit buffer */
+ bb = b;
+ bk = k;
+
+DEBG("dyn5a ");
+
+ /* build the decoding tables for literal/length and distance codes */
+ bl = lbits;
+ if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0)
+ {
+DEBG("dyn5b ");
+ if (i == 1) {
+ error("incomplete literal tree");
+ huft_free(tl);
+ }
+ ret = i; /* incomplete code set */
+ goto out;
+ }
+DEBG("dyn5c ");
+ bd = dbits;
+ if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0)
+ {
+DEBG("dyn5d ");
+ if (i == 1) {
+ error("incomplete distance tree");
+#ifdef PKZIP_BUG_WORKAROUND
+ i = 0;
+ }
+#else
+ huft_free(td);
+ }
+ huft_free(tl);
+ ret = i; /* incomplete code set */
+ goto out;
+#endif
+ }
+
+DEBG("dyn6 ");
+
+ /* decompress until an end-of-block code */
+ if (inflate_codes(tl, td, bl, bd)) {
+ ret = 1;
+ goto out;
+ }
+
+DEBG("dyn7 ");
+
+ /* free the decoding tables, return */
+ huft_free(tl);
+ huft_free(td);
+
+ DEBG(">");
+ ret = 0;
+out:
+ free(ll);
+ return ret;
+
+underrun:
+ ret = 4; /* Input underrun */
+ goto out;
+}
+
+
+
+STATIC int INIT inflate_block(
+ int *e /* last block flag */
+ )
+/* decompress an inflated block */
+{
+ unsigned t; /* block type */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+
+ DEBG("<blk");
+
+ /* make local bit buffer */
+ b = bb;
+ k = bk;
+
+
+ /* read in last block bit */
+ NEEDBITS(1)
+ *e = (int)b & 1;
+ DUMPBITS(1)
+
+
+ /* read in block type */
+ NEEDBITS(2)
+ t = (unsigned)b & 3;
+ DUMPBITS(2)
+
+
+ /* restore the global bit buffer */
+ bb = b;
+ bk = k;
+
+ /* inflate that block type */
+ if (t == 2)
+ return inflate_dynamic();
+ if (t == 0)
+ return inflate_stored();
+ if (t == 1)
+ return inflate_fixed();
+
+ DEBG(">");
+
+ /* bad block type */
+ return 2;
+
+ underrun:
+ return 4; /* Input underrun */
+}
+
+
+
+STATIC int INIT inflate(void)
+/* decompress an inflated entry */
+{
+ int e; /* last block flag */
+ int r; /* result code */
+ unsigned h; /* maximum struct huft's malloc'ed */
+ void *ptr;
+
+ /* initialize window, bit buffer */
+ wp = 0;
+ bk = 0;
+ bb = 0;
+
+
+ /* decompress until the last block */
+ h = 0;
+ do {
+ hufts = 0;
+ gzip_mark(&ptr);
+ if ((r = inflate_block(&e)) != 0) {
+ gzip_release(&ptr);
+ return r;
+ }
+ gzip_release(&ptr);
+ if (hufts > h)
+ h = hufts;
+ } while (!e);
+
+ /* Undo too much lookahead. The next read will be byte aligned so we
+ * can discard unused bits in the last meaningful byte.
+ */
+ while (bk >= 8) {
+ bk -= 8;
+ inptr--;
+ }
+
+ /* flush out slide */
+ flush_output(wp);
+
+
+ /* return success */
+#ifdef DEBUG
+ fprintf(stderr, "<%u> ", h);
+#endif /* DEBUG */
+ return 0;
+}
+
+/**********************************************************************
+ *
+ * The following are support routines for inflate.c
+ *
+ **********************************************************************/
+
+static ulg crc_32_tab[256];
+static ulg crc; /* initialized in makecrc() so it'll reside in bss */
+#define CRC_VALUE (crc ^ 0xffffffffUL)
+
+/*
+ * Code to compute the CRC-32 table. Borrowed from
+ * gzip-1.0.3/makecrc.c.
+ */
+
+static void INIT
+makecrc(void)
+{
+/* Not copyrighted 1990 Mark Adler */
+
+ unsigned long c; /* crc shift register */
+ unsigned long e; /* polynomial exclusive-or pattern */
+ int i; /* counter for all possible eight bit values */
+ int k; /* byte being shifted into crc apparatus */
+
+ /* terms of polynomial defining this crc (except x^32): */
+ static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
+
+ /* Make exclusive-or pattern from polynomial */
+ e = 0;
+ for (i = 0; i < sizeof(p)/sizeof(int); i++)
+ e |= 1L << (31 - p[i]);
+
+ crc_32_tab[0] = 0;
+
+ for (i = 1; i < 256; i++)
+ {
+ c = 0;
+ for (k = i | 256; k != 1; k >>= 1)
+ {
+ c = c & 1 ? (c >> 1) ^ e : c >> 1;
+ if (k & 1)
+ c ^= e;
+ }
+ crc_32_tab[i] = c;
+ }
+
+ /* this is initialized here so this code could reside in ROM */
+ crc = (ulg)0xffffffffUL; /* shift register contents */
+}
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
+
+/*
+ * Do the uncompression!
+ */
+static int INIT gunzip(void)
+{
+ uch flags;
+ unsigned char magic[2]; /* magic header */
+ char method;
+ ulg orig_crc = 0; /* original crc */
+ ulg orig_len = 0; /* original uncompressed length */
+ int res;
+
+ magic[0] = NEXTBYTE();
+ magic[1] = NEXTBYTE();
+ method = NEXTBYTE();
+
+ if (magic[0] != 037 ||
+ ((magic[1] != 0213) && (magic[1] != 0236))) {
+ error("bad gzip magic numbers");
+ return -1;
+ }
+
+ /* We only support method #8, DEFLATED */
+ if (method != 8) {
+ error("internal error, invalid method");
+ return -1;
+ }
+
+ flags = (uch)get_byte();
+ if ((flags & ENCRYPTED) != 0) {
+ error("Input is encrypted");
+ return -1;
+ }
+ if ((flags & CONTINUATION) != 0) {
+ error("Multi part input");
+ return -1;
+ }
+ if ((flags & RESERVED) != 0) {
+ error("Input has invalid flags");
+ return -1;
+ }
+ NEXTBYTE(); /* Get timestamp */
+ NEXTBYTE();
+ NEXTBYTE();
+ NEXTBYTE();
+
+ (void)NEXTBYTE(); /* Ignore extra flags for the moment */
+ (void)NEXTBYTE(); /* Ignore OS type for the moment */
+
+ if ((flags & EXTRA_FIELD) != 0) {
+ unsigned len = (unsigned)NEXTBYTE();
+ len |= ((unsigned)NEXTBYTE())<<8;
+ while (len--) (void)NEXTBYTE();
+ }
+
+ /* Get original file name if it was truncated */
+ if ((flags & ORIG_NAME) != 0) {
+ /* Discard the old name */
+ while (NEXTBYTE() != 0) /* null */ ;
+ }
+
+ /* Discard file comment if any */
+ if ((flags & COMMENT) != 0) {
+ while (NEXTBYTE() != 0) /* null */ ;
+ }
+
+ /* Decompress */
+ if ((res = inflate())) {
+ switch (res) {
+ case 0:
+ break;
+ case 1:
+ error("invalid compressed format (err=1)");
+ break;
+ case 2:
+ error("invalid compressed format (err=2)");
+ break;
+ case 3:
+ error("out of memory");
+ break;
+ case 4:
+ error("out of input data");
+ break;
+ default:
+ error("invalid compressed format (other)");
+ }
+ return -1;
+ }
+
+ /* Get the crc and original length */
+ /* crc32 (see algorithm.doc)
+ * uncompressed input size modulo 2^32
+ */
+ orig_crc = (ulg) NEXTBYTE();
+ orig_crc |= (ulg) NEXTBYTE() << 8;
+ orig_crc |= (ulg) NEXTBYTE() << 16;
+ orig_crc |= (ulg) NEXTBYTE() << 24;
+
+ orig_len = (ulg) NEXTBYTE();
+ orig_len |= (ulg) NEXTBYTE() << 8;
+ orig_len |= (ulg) NEXTBYTE() << 16;
+ orig_len |= (ulg) NEXTBYTE() << 24;
+
+ /* Validate decompression */
+ if (orig_crc != CRC_VALUE) {
+ error("crc error");
+ return -1;
+ }
+ if (orig_len != bytes_out) {
+ error("length error");
+ return -1;
+ }
+ return 0;
+
+ underrun: /* NEXTBYTE() goto's here if needed */
+ error("out of input data");
+ return -1;
+}
+
+
--- /dev/null
+/*
+ * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
+ *
+ * Jan 23 2005 Matt Mackall <mpm@selenic.com>
+ */
+
+#include <lwk/kernel.h>
+#include <lwk/sort.h>
+#include <lwk/linux_compat.h>
+
+static void u32_swap(void *a, void *b, int size)
+{
+ u32 t = *(u32 *)a;
+ *(u32 *)a = *(u32 *)b;
+ *(u32 *)b = t;
+}
+
+static void generic_swap(void *a, void *b, int size)
+{
+ char t;
+
+ do {
+ t = *(char *)a;
+ *(char *)a++ = *(char *)b;
+ *(char *)b++ = t;
+ } while (--size > 0);
+}
+
+/**
+ * sort - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp: pointer to comparison function
+ * @swap: pointer to swap function or NULL
+ *
+ * This function does a heapsort on the given array. You may provide a
+ * swap function optimized to your element type.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * qsort is about 20% faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+
+void sort(void *base, size_t num, size_t size,
+ int (*cmp)(const void *, const void *),
+ void (*swap)(void *, void *, int size))
+{
+ /* pre-scale counters for performance */
+ int i = (num/2 - 1) * size, n = num * size, c, r;
+
+ if (!swap)
+ swap = (size == 4 ? u32_swap : generic_swap);
+
+ /* heapify */
+ for ( ; i >= 0; i -= size) {
+ for (r = i; r * 2 + size < n; r = c) {
+ c = r * 2 + size;
+ if (c < n - size && cmp(base + c, base + c + size) < 0)
+ c += size;
+ if (cmp(base + r, base + c) >= 0)
+ break;
+ swap(base + r, base + c, size);
+ }
+ }
+
+ /* sort */
+ for (i = n - size; i >= 0; i -= size) {
+ swap(base, base + i, size);
+ for (r = 0; r * 2 + size < i; r = c) {
+ c = r * 2 + size;
+ if (c < i - size && cmp(base + c, base + c + size) < 0)
+ c += size;
+ if (cmp(base + r, base + c) >= 0)
+ break;
+ swap(base + r, base + c, size);
+ }
+ }
+}
+
+EXPORT_SYMBOL(sort);
+
+#if 0
+/* a simple boot-time regression test */
+
+int cmpint(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int sort_test(void)
+{
+ int *a, i, r = 1;
+
+ a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
+ BUG_ON(!a);
+
+ printk("testing sort()\n");
+
+ for (i = 0; i < 1000; i++) {
+ r = (r * 725861) % 6599;
+ a[i] = r;
+ }
+
+ sort(a, 1000, sizeof(int), cmpint, NULL);
+
+ for (i = 0; i < 999; i++)
+ if (a[i] > a[i+1]) {
+ printk("sort() failed!\n");
+ break;
+ }
+
+ kfree(a);
+
+ return 0;
+}
+
+module_init(sort_test);
+#endif
--- /dev/null
+/*
+ * linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * stupid library routines.. The optimized versions should generally be found
+ * as inline code in <asm-xx/string.h>
+ *
+ * These are buggy as well..
+ *
+ * * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de>
+ * - Added strsep() which will replace strtok() soon (because strsep() is
+ * reentrant and should be faster). Use only strsep() in new code, please.
+ *
+ * * Sat Feb 09 2002, Jason Thomas <jason@topic.com.au>,
+ * Matthew Hawkins <matt@mh.dropbear.id.au>
+ * - Kissed strtok() goodbye
+ */
+
+#include <lwk/kernel.h>
+#include <lwk/types.h>
+#include <lwk/string.h>
+#include <lwk/ctype.h>
+#include <lwk/linux_compat.h>
+#include <arch/bug.h>
+
+#ifndef __HAVE_ARCH_STRNICMP
+/**
+ * strnicmp - Case insensitive, length-limited string comparison
+ * @s1: One string
+ * @s2: The other string
+ * @len: the maximum number of characters to compare
+ */
+int strnicmp(const char *s1, const char *s2, size_t len)
+{
+ /* Yes, Virginia, it had better be unsigned */
+ unsigned char c1, c2;
+
+ c1 = c2 = 0;
+ if (len) {
+ do {
+ c1 = *s1;
+ c2 = *s2;
+ s1++;
+ s2++;
+ if (!c1)
+ break;
+ if (!c2)
+ break;
+ if (c1 == c2)
+ continue;
+ c1 = tolower(c1);
+ c2 = tolower(c2);
+ if (c1 != c2)
+ break;
+ } while (--len);
+ }
+ return (int)c1 - (int)c2;
+}
+EXPORT_SYMBOL(strnicmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRCPY
+/**
+ * strcpy - Copy a %NUL terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ */
+#undef strcpy
+char *strcpy(char *dest, const char *src)
+{
+ char *tmp = dest;
+
+ while ((*dest++ = *src++) != '\0')
+ /* nothing */;
+ return tmp;
+}
+EXPORT_SYMBOL(strcpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCPY
+/**
+ * strncpy - Copy a length-limited, %NUL-terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: The maximum number of bytes to copy
+ *
+ * The result is not %NUL-terminated if the source exceeds
+ * @count bytes.
+ *
+ * In the case where the length of @src is less than that of
+ * count, the remainder of @dest will be padded with %NUL.
+ *
+ */
+char *strncpy(char *dest, const char *src, size_t count)
+{
+ char *tmp = dest;
+
+ while (count) {
+ if ((*tmp = *src) != 0)
+ src++;
+ tmp++;
+ count--;
+ }
+ return dest;
+}
+EXPORT_SYMBOL(strncpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRLCPY
+/**
+ * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ *
+ * Compatible with *BSD: the result is always a valid
+ * NUL-terminated string that fits in the buffer (unless,
+ * of course, the buffer size is zero). It does not pad
+ * out the result like strncpy() does.
+ */
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+EXPORT_SYMBOL(strlcpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRCAT
+/**
+ * strcat - Append one %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ */
+#undef strcat
+char *strcat(char *dest, const char *src)
+{
+ char *tmp = dest;
+
+ while (*dest)
+ dest++;
+ while ((*dest++ = *src++) != '\0')
+ ;
+ return tmp;
+}
+EXPORT_SYMBOL(strcat);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCAT
+/**
+ * strncat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @count: The maximum numbers of bytes to copy
+ *
+ * Note that in contrast to strncpy, strncat ensures the result is
+ * terminated.
+ */
+char *strncat(char *dest, const char *src, size_t count)
+{
+ char *tmp = dest;
+
+ if (count) {
+ while (*dest)
+ dest++;
+ while ((*dest++ = *src++) != 0) {
+ if (--count == 0) {
+ *dest = '\0';
+ break;
+ }
+ }
+ }
+ return tmp;
+}
+EXPORT_SYMBOL(strncat);
+#endif
+
+#ifndef __HAVE_ARCH_STRLCAT
+/**
+ * strlcat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @count: The size of the destination buffer.
+ */
+size_t strlcat(char *dest, const char *src, size_t count)
+{
+ size_t dsize = strlen(dest);
+ size_t len = strlen(src);
+ size_t res = dsize + len;
+
+ /* This would be a bug */
+ BUG_ON(dsize >= count);
+
+ dest += dsize;
+ count -= dsize;
+ if (len >= count)
+ len = count-1;
+ memcpy(dest, src, len);
+ dest[len] = 0;
+ return res;
+}
+EXPORT_SYMBOL(strlcat);
+#endif
+
+#ifndef __HAVE_ARCH_STRCMP
+/**
+ * strcmp - Compare two strings
+ * @cs: One string
+ * @ct: Another string
+ */
+#undef strcmp
+int strcmp(const char *cs, const char *ct)
+{
+ signed char __res;
+
+ while (1) {
+ if ((__res = *cs - *ct++) != 0 || !*cs++)
+ break;
+ }
+ return __res;
+}
+EXPORT_SYMBOL(strcmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCMP
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+ signed char __res = 0;
+
+ while (count) {
+ if ((__res = *cs - *ct++) != 0 || !*cs++)
+ break;
+ count--;
+ }
+ return __res;
+}
+EXPORT_SYMBOL(strncmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRCHR
+/**
+ * strchr - Find the first occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char *strchr(const char *s, int c)
+{
+ for (; *s != (char)c; ++s)
+ if (*s == '\0')
+ return NULL;
+ return (char *)s;
+}
+EXPORT_SYMBOL(strchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRRCHR
+/**
+ * strrchr - Find the last occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char *strrchr(const char *s, int c)
+{
+ const char *p = s + strlen(s);
+ do {
+ if (*p == (char)c)
+ return (char *)p;
+ } while (--p >= s);
+ return NULL;
+}
+EXPORT_SYMBOL(strrchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCHR
+/**
+ * strnchr - Find a character in a length limited string
+ * @s: The string to be searched
+ * @count: The number of characters to be searched
+ * @c: The character to search for
+ */
+char *strnchr(const char *s, size_t count, int c)
+{
+ for (; count-- && *s != '\0'; ++s)
+ if (*s == (char)c)
+ return (char *)s;
+ return NULL;
+}
+EXPORT_SYMBOL(strnchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRLEN
+/**
+ * strlen - Find the length of a string
+ * @s: The string to be sized
+ */
+size_t strlen(const char *s)
+{
+ const char *sc;
+
+ for (sc = s; *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+EXPORT_SYMBOL(strlen);
+#endif
+
+#ifndef __HAVE_ARCH_STRNLEN
+/**
+ * strnlen - Find the length of a length-limited string
+ * @s: The string to be sized
+ * @count: The maximum number of bytes to search
+ */
+size_t strnlen(const char *s, size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+EXPORT_SYMBOL(strnlen);
+#endif
+
+#ifndef __HAVE_ARCH_STRSPN
+/**
+ * strspn - Calculate the length of the initial substring of @s which only
+ * contain letters in @accept
+ * @s: The string to be searched
+ * @accept: The string to search for
+ */
+size_t strspn(const char *s, const char *accept)
+{
+ const char *p;
+ const char *a;
+ size_t count = 0;
+
+ for (p = s; *p != '\0'; ++p) {
+ for (a = accept; *a != '\0'; ++a) {
+ if (*p == *a)
+ break;
+ }
+ if (*a == '\0')
+ return count;
+ ++count;
+ }
+ return count;
+}
+
+EXPORT_SYMBOL(strspn);
+#endif
+
+#ifndef __HAVE_ARCH_STRCSPN
+/**
+ * strcspn - Calculate the length of the initial substring of @s which does
+ * not contain letters in @reject
+ * @s: The string to be searched
+ * @reject: The string to avoid
+ */
+size_t strcspn(const char *s, const char *reject)
+{
+ const char *p;
+ const char *r;
+ size_t count = 0;
+
+ for (p = s; *p != '\0'; ++p) {
+ for (r = reject; *r != '\0'; ++r) {
+ if (*p == *r)
+ return count;
+ }
+ ++count;
+ }
+ return count;
+}
+EXPORT_SYMBOL(strcspn);
+#endif
+
+#ifndef __HAVE_ARCH_STRPBRK
+/**
+ * strpbrk - Find the first occurrence of a set of characters
+ * @cs: The string to be searched
+ * @ct: The characters to search for
+ */
+char *strpbrk(const char *cs, const char *ct)
+{
+ const char *sc1, *sc2;
+
+ for (sc1 = cs; *sc1 != '\0'; ++sc1) {
+ for (sc2 = ct; *sc2 != '\0'; ++sc2) {
+ if (*sc1 == *sc2)
+ return (char *)sc1;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strpbrk);
+#endif
+
+#ifndef __HAVE_ARCH_STRSEP
+/**
+ * strsep - Split a string into tokens
+ * @s: The string to be searched
+ * @ct: The characters to search for
+ *
+ * strsep() updates @s to point after the token, ready for the next call.
+ *
+ * It returns empty tokens, too, behaving exactly like the libc function
+ * of that name. In fact, it was stolen from glibc2 and de-fancy-fied.
+ * Same semantics, slimmer shape. ;)
+ */
+char *strsep(char **s, const char *ct)
+{
+ char *sbegin = *s;
+ char *end;
+
+ if (sbegin == NULL)
+ return NULL;
+
+ end = strpbrk(sbegin, ct);
+ if (end)
+ *end++ = '\0';
+ *s = end;
+ return sbegin;
+}
+EXPORT_SYMBOL(strsep);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET
+/**
+ * memset - Fill a region of memory with the given value
+ * @s: Pointer to the start of the area.
+ * @c: The byte to fill the area with
+ * @count: The size of the area.
+ *
+ * Do not use memset() to access IO space, use memset_io() instead.
+ */
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = s;
+
+ while (count--)
+ *xs++ = c;
+ return s;
+}
+EXPORT_SYMBOL(memset);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCPY
+/**
+ * memcpy - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * You should not use this function to access IO space, use memcpy_toio()
+ * or memcpy_fromio() instead.
+ */
+void *memcpy(void *dest, const void *src, size_t count)
+{
+ char *tmp = dest;
+ const char *s = src;
+
+ while (count--)
+ *tmp++ = *s++;
+ return dest;
+}
+EXPORT_SYMBOL(memcpy);
+#endif
+
+#ifndef __HAVE_ARCH_MEMMOVE
+/**
+ * memmove - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * Unlike memcpy(), memmove() copes with overlapping areas.
+ */
+void *memmove(void *dest, const void *src, size_t count)
+{
+ char *tmp;
+ const char *s;
+
+ if (dest <= src) {
+ tmp = dest;
+ s = src;
+ while (count--)
+ *tmp++ = *s++;
+ } else {
+ tmp = dest;
+ tmp += count;
+ s = src;
+ s += count;
+ while (count--)
+ *--tmp = *--s;
+ }
+ return dest;
+}
+EXPORT_SYMBOL(memmove);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCMP
+/**
+ * memcmp - Compare two areas of memory
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ */
+#undef memcmp
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
+EXPORT_SYMBOL(memcmp);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSCAN
+/**
+ * memscan - Find a character in an area of memory.
+ * @addr: The memory area
+ * @c: The byte to search for
+ * @size: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or 1 byte past
+ * the area if @c is not found
+ */
+void *memscan(void *addr, int c, size_t size)
+{
+ unsigned char *p = addr;
+
+ while (size) {
+ if (*p == c)
+ return (void *)p;
+ p++;
+ size--;
+ }
+ return (void *)p;
+}
+EXPORT_SYMBOL(memscan);
+#endif
+
+#ifndef __HAVE_ARCH_STRSTR
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char *strstr(const char *s1, const char *s2)
+{
+ int l1, l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ l1 = strlen(s1);
+ while (l1 >= l2) {
+ l1--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strstr);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCHR
+/**
+ * memchr - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or %NULL
+ * if @c is not found
+ */
+void *memchr(const void *s, int c, size_t n)
+{
+ const unsigned char *p = s;
+ while (n-- != 0) {
+ if ((unsigned char)c == *p++) {
+ return (void *)(p - 1);
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(memchr);
+#endif
+
+
+/**
+ * Converts error code into human readable string.
+ */
+char *
+strerror(int errnum)
+{
+ if (errnum < 0)
+ errnum *= -1;
+
+ switch (errnum) {
+ case ENOMEM: return "Out of memory";
+ case EINVAL: return "Invalid argument";
+ }
+
+ return "unknown";
+}
+
--- /dev/null
+/*
+ * linux/lib/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+/*
+ * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
+ * - changed to provide snprintf and vsnprintf functions
+ * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
+ * - scnprintf and vscnprintf
+ */
+
+#include <stdarg.h>
+#include <lwk/linux_compat.h>
+#include <lwk/types.h>
+#include <lwk/string.h>
+#include <lwk/ctype.h>
+#include <lwk/kernel.h>
+
+#include <arch/page.h> /* for PAGE_SIZE */
+#include <arch/div64.h>
+
+/**
+ * simple_strtoul - convert a string to an unsigned long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
+{
+ unsigned long result = 0,value;
+
+ if (!base) {
+ base = 10;
+ if (*cp == '0') {
+ base = 8;
+ cp++;
+ if ((toupper(*cp) == 'X') && isxdigit(cp[1])) {
+ cp++;
+ base = 16;
+ }
+ }
+ } else if (base == 16) {
+ if (cp[0] == '0' && toupper(cp[1]) == 'X')
+ cp += 2;
+ }
+ while (isxdigit(*cp) &&
+ (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) {
+ result = result*base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+ return result;
+}
+
+EXPORT_SYMBOL(simple_strtoul);
+
+/**
+ * simple_strtol - convert a string to a signed long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+long simple_strtol(const char *cp,char **endp,unsigned int base)
+{
+ if(*cp=='-')
+ return -simple_strtoul(cp+1,endp,base);
+ return simple_strtoul(cp,endp,base);
+}
+
+EXPORT_SYMBOL(simple_strtol);
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base)
+{
+ unsigned long long result = 0,value;
+
+ if (!base) {
+ base = 10;
+ if (*cp == '0') {
+ base = 8;
+ cp++;
+ if ((toupper(*cp) == 'X') && isxdigit(cp[1])) {
+ cp++;
+ base = 16;
+ }
+ }
+ } else if (base == 16) {
+ if (cp[0] == '0' && toupper(cp[1]) == 'X')
+ cp += 2;
+ }
+ while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp)
+ ? toupper(*cp) : *cp)-'A'+10) < base) {
+ result = result*base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+ return result;
+}
+
+EXPORT_SYMBOL(simple_strtoull);
+
+/**
+ * simple_strtoll - convert a string to a signed long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+long long simple_strtoll(const char *cp,char **endp,unsigned int base)
+{
+ if(*cp=='-')
+ return -simple_strtoull(cp+1,endp,base);
+ return simple_strtoull(cp,endp,base);
+}
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (isdigit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, unsigned long long num, int base, int size, int precision, int type)
+{
+ char c,sign,tmp[66];
+ const char *digits;
+ static const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+ static const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ int i;
+
+ digits = (type & LARGE) ? large_digits : small_digits;
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return NULL;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if ((signed long long) num < 0) {
+ sign = '-';
+ num = - (signed long long) num;
+ size--;
+ } else if (type & PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(ZEROPAD+LEFT))) {
+ while(size-->0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ if (sign) {
+ if (buf <= end)
+ *buf = sign;
+ ++buf;
+ }
+ if (type & SPECIAL) {
+ if (base==8) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ } else if (base==16) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ if (buf <= end)
+ *buf = digits[33];
+ ++buf;
+ }
+ }
+ if (!(type & LEFT)) {
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = c;
+ ++buf;
+ }
+ }
+ while (i < precision--) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ }
+ while (i-- > 0) {
+ if (buf <= end)
+ *buf = tmp[i];
+ ++buf;
+ }
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ return buf;
+}
+
+/**
+ * vsnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The return value is the number of characters which would
+ * be generated for the given input, excluding the trailing
+ * '\0', as per ISO C99. If you want to have the exact
+ * number of characters written into @buf as return value
+ * (not including the trailing '\0'), use vscnprintf. If the
+ * return is greater than or equal to @size, the resulting
+ * string is truncated.
+ *
+ * Call this function if you are already dealing with a va_list.
+ * You probably want snprintf instead.
+ */
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long long num;
+ int i, base;
+ char *str, *end, c;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+ /* 't' added for ptrdiff_t */
+
+ /* Reject out-of-range values early */
+ if (unlikely((int) size < 0)) {
+ /* There can be only one.. */
+ static int warn = 1;
+ WARN_ON(warn);
+ warn = 0;
+ return 0;
+ }
+
+ str = buf;
+ end = buf + size - 1;
+
+ if (end < buf - 1) {
+ end = ((void *) -1);
+ size = end - buf + 1;
+ }
+
+ for (; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= LEFT; goto repeat;
+ case '+': flags |= PLUS; goto repeat;
+ case ' ': flags |= SPACE; goto repeat;
+ case '#': flags |= SPECIAL; goto repeat;
+ case '0': flags |= ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (isdigit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+ *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == 'l' && *fmt == 'l') {
+ qualifier = 'L';
+ ++fmt;
+ }
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & LEFT)) {
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ c = (unsigned char) va_arg(args, int);
+ if (str <= end)
+ *str = c;
+ ++str;
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if ((unsigned long)s < PAGE_SIZE)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT)) {
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ for (i = 0; i < len; ++i) {
+ if (str <= end)
+ *str = *s;
+ ++str; ++s;
+ }
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ str = number(str, end,
+ (unsigned long) va_arg(args, void *),
+ 16, field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ /* FIXME:
+ * What does C99 say about the overflow case here? */
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else if (qualifier == 'Z' || qualifier == 'z') {
+ size_t * ip = va_arg(args, size_t *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ case '%':
+ if (str <= end)
+ *str = '%';
+ ++str;
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (str <= end)
+ *str = '%';
+ ++str;
+ if (*fmt) {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ } else {
+ --fmt;
+ }
+ continue;
+ }
+ if (qualifier == 'L')
+ num = va_arg(args, long long);
+ else if (qualifier == 'l') {
+ num = va_arg(args, unsigned long);
+ if (flags & SIGN)
+ num = (signed long) num;
+ } else if (qualifier == 'Z' || qualifier == 'z') {
+ num = va_arg(args, size_t);
+ } else if (qualifier == 't') {
+ num = va_arg(args, ptrdiff_t);
+ } else if (qualifier == 'h') {
+ num = (unsigned short) va_arg(args, int);
+ if (flags & SIGN)
+ num = (signed short) num;
+ } else {
+ num = va_arg(args, unsigned int);
+ if (flags & SIGN)
+ num = (signed int) num;
+ }
+ str = number(str, end, num, base,
+ field_width, precision, flags);
+ }
+ if (str <= end)
+ *str = '\0';
+ else if (size > 0)
+ /* don't write out a null byte if the buf size is zero */
+ *end = '\0';
+ /* the trailing null byte doesn't count towards the total
+ * ++str;
+ */
+ return str-buf;
+}
+
+EXPORT_SYMBOL(vsnprintf);
+
+/**
+ * vscnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The return value is the number of characters which have been written into
+ * the @buf not including the trailing '\0'. If @size is <= 0 the function
+ * returns 0.
+ *
+ * Call this function if you are already dealing with a va_list.
+ * You probably want scnprintf instead.
+ */
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int i;
+
+ i=vsnprintf(buf,size,fmt,args);
+ return (i >= size) ? (size - 1) : i;
+}
+
+EXPORT_SYMBOL(vscnprintf);
+
+/**
+ * snprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The return value is the number of characters which would be
+ * generated for the given input, excluding the trailing null,
+ * as per ISO C99. If the return is greater than or equal to
+ * @size, the resulting string is truncated.
+ */
+int snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i=vsnprintf(buf,size,fmt,args);
+ va_end(args);
+ return i;
+}
+
+EXPORT_SYMBOL(snprintf);
+
+/**
+ * scnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The return value is the number of characters written into @buf not including
+ * the trailing '\0'. If @size is <= 0 the function returns 0. If the return is
+ * greater than or equal to @size, the resulting string is truncated.
+ */
+
+int scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return (i >= size) ? (size - 1) : i;
+}
+EXPORT_SYMBOL(scnprintf);
+
+/**
+ * vsprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The function returns the number of characters written
+ * into @buf. Use vsnprintf or vscnprintf in order to avoid
+ * buffer overflows.
+ *
+ * Call this function if you are already dealing with a va_list.
+ * You probably want sprintf instead.
+ */
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+ return vsnprintf(buf, INT_MAX, fmt, args);
+}
+
+EXPORT_SYMBOL(vsprintf);
+
+/**
+ * sprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The function returns the number of characters written
+ * into @buf. Use snprintf or scnprintf in order to avoid
+ * buffer overflows.
+ */
+int sprintf(char * buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i=vsnprintf(buf, INT_MAX, fmt, args);
+ va_end(args);
+ return i;
+}
+
+EXPORT_SYMBOL(sprintf);
+
+/**
+ * vsscanf - Unformat a buffer into a list of arguments
+ * @buf: input buffer
+ * @fmt: format of buffer
+ * @args: arguments
+ */
+int vsscanf(const char * buf, const char * fmt, va_list args)
+{
+ const char *str = buf;
+ char *next;
+ char digit;
+ int num = 0;
+ int qualifier;
+ int base;
+ int field_width;
+ int is_sign = 0;
+
+ while(*fmt && *str) {
+ /* skip any white space in format */
+ /* white space in format matchs any amount of
+ * white space, including none, in the input.
+ */
+ if (isspace(*fmt)) {
+ while (isspace(*fmt))
+ ++fmt;
+ while (isspace(*str))
+ ++str;
+ }
+
+ /* anything that is not a conversion must match exactly */
+ if (*fmt != '%' && *fmt) {
+ if (*fmt++ != *str++)
+ break;
+ continue;
+ }
+
+ if (!*fmt)
+ break;
+ ++fmt;
+
+ /* skip this conversion.
+ * advance both strings to next white space
+ */
+ if (*fmt == '*') {
+ while (!isspace(*fmt) && *fmt)
+ fmt++;
+ while (!isspace(*str) && *str)
+ str++;
+ continue;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt))
+ field_width = skip_atoi(&fmt);
+
+ /* get conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
+ *fmt == 'Z' || *fmt == 'z') {
+ qualifier = *fmt++;
+ if (unlikely(qualifier == *fmt)) {
+ if (qualifier == 'h') {
+ qualifier = 'H';
+ fmt++;
+ } else if (qualifier == 'l') {
+ qualifier = 'L';
+ fmt++;
+ }
+ }
+ }
+ base = 10;
+ is_sign = 0;
+
+ if (!*fmt || !*str)
+ break;
+
+ switch(*fmt++) {
+ case 'c':
+ {
+ char *s = (char *) va_arg(args,char*);
+ if (field_width == -1)
+ field_width = 1;
+ do {
+ *s++ = *str++;
+ } while (--field_width > 0 && *str);
+ num++;
+ }
+ continue;
+ case 's':
+ {
+ char *s = (char *) va_arg(args, char *);
+ if(field_width == -1)
+ field_width = INT_MAX;
+ /* first, skip leading white space in buffer */
+ while (isspace(*str))
+ str++;
+
+ /* now copy until next white space */
+ while (*str && !isspace(*str) && field_width--) {
+ *s++ = *str++;
+ }
+ *s = '\0';
+ num++;
+ }
+ continue;
+ case 'n':
+ /* return number of characters read so far */
+ {
+ int *i = (int *)va_arg(args,int*);
+ *i = str - buf;
+ }
+ continue;
+ case 'o':
+ base = 8;
+ break;
+ case 'x':
+ case 'X':
+ base = 16;
+ break;
+ case 'i':
+ base = 0;
+ case 'd':
+ is_sign = 1;
+ case 'u':
+ break;
+ case '%':
+ /* looking for '%' in str */
+ if (*str++ != '%')
+ return num;
+ continue;
+ default:
+ /* invalid format; stop here */
+ return num;
+ }
+
+ /* have some sort of integer conversion.
+ * first, skip white space in buffer.
+ */
+ while (isspace(*str))
+ str++;
+
+ digit = *str;
+ if (is_sign && digit == '-')
+ digit = *(str + 1);
+
+ if (!digit
+ || (base == 16 && !isxdigit(digit))
+ || (base == 10 && !isdigit(digit))
+ || (base == 8 && (!isdigit(digit) || digit > '7'))
+ || (base == 0 && !isdigit(digit)))
+ break;
+
+ switch(qualifier) {
+ case 'H': /* that's 'hh' in format */
+ if (is_sign) {
+ signed char *s = (signed char *) va_arg(args,signed char *);
+ *s = (signed char) simple_strtol(str,&next,base);
+ } else {
+ unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
+ *s = (unsigned char) simple_strtoul(str, &next, base);
+ }
+ break;
+ case 'h':
+ if (is_sign) {
+ short *s = (short *) va_arg(args,short *);
+ *s = (short) simple_strtol(str,&next,base);
+ } else {
+ unsigned short *s = (unsigned short *) va_arg(args, unsigned short *);
+ *s = (unsigned short) simple_strtoul(str, &next, base);
+ }
+ break;
+ case 'l':
+ if (is_sign) {
+ long *l = (long *) va_arg(args,long *);
+ *l = simple_strtol(str,&next,base);
+ } else {
+ unsigned long *l = (unsigned long*) va_arg(args,unsigned long*);
+ *l = simple_strtoul(str,&next,base);
+ }
+ break;
+ case 'L':
+ if (is_sign) {
+ long long *l = (long long*) va_arg(args,long long *);
+ *l = simple_strtoll(str,&next,base);
+ } else {
+ unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*);
+ *l = simple_strtoull(str,&next,base);
+ }
+ break;
+ case 'Z':
+ case 'z':
+ {
+ size_t *s = (size_t*) va_arg(args,size_t*);
+ *s = (size_t) simple_strtoul(str,&next,base);
+ }
+ break;
+ default:
+ if (is_sign) {
+ int *i = (int *) va_arg(args, int*);
+ *i = (int) simple_strtol(str,&next,base);
+ } else {
+ unsigned int *i = (unsigned int*) va_arg(args, unsigned int*);
+ *i = (unsigned int) simple_strtoul(str,&next,base);
+ }
+ break;
+ }
+ num++;
+
+ if (!next)
+ break;
+ str = next;
+ }
+ return num;
+}
+
+EXPORT_SYMBOL(vsscanf);
+
+/**
+ * sscanf - Unformat a buffer into a list of arguments
+ * @buf: input buffer
+ * @fmt: formatting of buffer
+ * @...: resulting arguments
+ */
+int sscanf(const char * buf, const char * fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args,fmt);
+ i = vsscanf(buf,fmt,args);
+ va_end(args);
+ return i;
+}
+
+EXPORT_SYMBOL(sscanf);
--- /dev/null
+obj-y := bootmem.o buddy.o kmem.o aspace.o pmem.o pmem_liblwk.o \
+ aspace_liblwk.o
--- /dev/null
+/* Copyright (c) 2007,2008 Sandia National Laboratories */
+
+#include <lwk/kernel.h>
+#include <lwk/task.h>
+#include <lwk/spinlock.h>
+#include <lwk/string.h>
+#include <lwk/aspace.h>
+#include <lwk/idspace.h>
+#include <lwk/htable.h>
+#include <lwk/log2.h>
+#include <lwk/cpuinfo.h>
+#include <lwk/pmem.h>
+#include <arch/uaccess.h>
+
+/**
+ * ID space used to allocate address space IDs.
+ */
+static idspace_t idspace;
+
+/**
+ * Hash table used to lookup address space structures by ID.
+ */
+static htable_t htable;
+
+/**
+ * Lock for serializing access to the htable.
+ */
+static DEFINE_SPINLOCK(htable_lock);
+
+/**
+ * Memory region structure. A memory region represents a contiguous region
+ * [start, end) of valid memory addresses in an address space.
+ */
+struct region {
+ struct aspace * aspace; /* Address space this region belongs to */
+ struct list_head link; /* Linkage in the aspace->region_list */
+
+ vaddr_t start; /* Starting address of the region */
+ vaddr_t end; /* 1st byte after end of the region */
+ vmflags_t flags; /* Permissions, caching, etc. */
+ vmpagesize_t pagesz; /* Allowed page sizes... 2^bit */
+ id_t smartmap; /* If (flags & VM_SMARTMAP), ID of the
+ aspace this region is mapped to */
+ char name[16]; /* Human-readable name of the region */
+};
+
+/**
+ * This calculates a region's end address. Normally end is the address of the
+ * first byte after the region. However if the region extends to the end of
+ * memory, that is not possible so set end to the last valid address,
+ * ULONG_MAX.
+ */
+static vaddr_t
+calc_end(vaddr_t start, size_t extent)
+{
+ vaddr_t end = start + extent;
+ if (end == 0)
+ end = ULONG_MAX;
+ return end;
+}
+
+/**
+ * Locates the region covering the specified address.
+ */
+static struct region *
+find_region(struct aspace *aspace, vaddr_t addr)
+{
+ struct region *rgn;
+
+ list_for_each_entry(rgn, &aspace->region_list, link) {
+ if ((rgn->start <= addr) && (rgn->end > addr))
+ return rgn;
+ }
+ return NULL;
+}
+
+/**
+ * Finds a region that overlaps the specified interval.
+ */
+static struct region *
+find_overlapping_region(struct aspace *aspace, vaddr_t start, vaddr_t end)
+{
+ struct region *rgn;
+
+ list_for_each_entry(rgn, &aspace->region_list, link) {
+ if ((start < rgn->end) && (end > rgn->start))
+ return rgn;
+ }
+ return NULL;
+}
+
+/**
+ * Locates the region that is SMARTMAP'ed to the specified aspace ID.
+ */
+static struct region *
+find_smartmap_region(struct aspace *aspace, id_t src_aspace)
+{
+ struct region *rgn;
+
+ list_for_each_entry(rgn, &aspace->region_list, link) {
+ if ((rgn->flags & VM_SMARTMAP) && (rgn->smartmap == src_aspace))
+ return rgn;
+ }
+ return NULL;
+}
+
+/**
+ * Looks up an aspace object by ID and returns it with its spinlock locked.
+ */
+static struct aspace *
+lookup_and_lock(id_t id)
+{
+ struct aspace *aspace;
+
+ /* Lock the hash table, lookup aspace object by ID */
+ spin_lock(&htable_lock);
+ if ((aspace = htable_lookup(htable, id)) == NULL) {
+ spin_unlock(&htable_lock);
+ return NULL;
+ }
+
+ /* Lock the identified aspace */
+ spin_lock(&aspace->lock);
+
+ /* Unlock the hash table, others may now use it */
+ spin_unlock(&htable_lock);
+
+ return aspace;
+}
+
+/**
+ * Like lookup_and_lock(), but looks up two address spaces instead of one.
+ */
+static int
+lookup_and_lock_two(id_t a, id_t b,
+ struct aspace **aspace_a, struct aspace **aspace_b)
+{
+ /* Lock the hash table, lookup aspace objects by ID */
+ spin_lock(&htable_lock);
+ if ((*aspace_a = htable_lookup(htable, a)) == NULL) {
+ spin_unlock(&htable_lock);
+ return -ENOENT;
+ }
+
+ if ((*aspace_b = htable_lookup(htable, b)) == NULL) {
+ spin_unlock(&htable_lock);
+ return -ENOENT;
+ }
+
+ /* Lock the identified aspaces */
+ spin_lock(&(*aspace_a)->lock);
+ spin_lock(&(*aspace_b)->lock);
+
+ /* Unlock the hash table, others may now use it */
+ spin_unlock(&htable_lock);
+
+ return 0;
+}
+
+static bool
+id_ok(id_t id)
+{
+ return ((id >= ASPACE_MIN_ID) && (id <= ASPACE_MAX_ID));
+}
+
+int __init
+aspace_subsys_init(void)
+{
+ int status;
+
+ /* Create an ID space for allocating address space IDs */
+ if ((status = idspace_create(__ASPACE_MIN_ID, __ASPACE_MAX_ID, &idspace)))
+ panic("Failed to create aspace ID space (status=%d).", status);
+
+ /* Create a hash table that will be used for quick ID->aspace lookups */
+ if ((status = htable_create(7 /* 2^7 bins */,
+ offsetof(struct aspace, id),
+ offsetof(struct aspace, ht_link),
+ &htable)))
+ panic("Failed to create aspace hash table (status=%d).", status);
+
+ /* Create an aspace for use by kernel threads */
+ if ((status = aspace_create(KERNEL_ASPACE_ID, "kernel", NULL)))
+ panic("Failed to create kernel aspace (status=%d).", status);
+
+ /* Switch to the newly created kernel address space */
+ if ((current->aspace = aspace_acquire(KERNEL_ASPACE_ID)) == NULL)
+ panic("Failed to acquire kernel aspace.");
+ arch_aspace_activate(current->aspace);
+
+ return 0;
+}
+
+int
+aspace_get_myid(id_t *id)
+{
+ *id = current->aspace->id;
+ return 0;
+}
+
+int
+sys_aspace_get_myid(id_t __user *id)
+{
+ int status;
+ id_t _id;
+
+ if ((status = aspace_get_myid(&_id)) != 0)
+ return status;
+
+ if (id && copy_to_user(id, &_id, sizeof(*id)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+aspace_create(id_t id_request, const char *name, id_t *id)
+{
+ int status;
+ id_t new_id;
+ struct aspace *aspace;
+ unsigned long flags;
+
+ if ((status = idspace_alloc_id(idspace, id_request, &new_id)) != 0)
+ return status;
+
+ if ((aspace = kmem_alloc(sizeof(*aspace))) == NULL) {
+ idspace_free_id(idspace, new_id);
+ return -ENOMEM;
+ }
+
+ /*
+ * Initialize the address space. kmem_alloc() allocates zeroed memory
+ * so fields with an initial state of zero do not need to be explicitly
+ * initialized.
+ */
+ aspace->id = new_id;
+ spin_lock_init(&aspace->lock);
+ list_head_init(&aspace->region_list);
+ hlist_node_init(&aspace->ht_link);
+ if (name)
+ strlcpy(aspace->name, name, sizeof(aspace->name));
+
+ /* Create a region for the kernel portion of the address space */
+ status =
+ __aspace_add_region(
+ aspace,
+ PAGE_OFFSET,
+ ULONG_MAX-PAGE_OFFSET+1, /* # bytes to end of memory */
+ VM_KERNEL,
+ PAGE_SIZE,
+ "kernel"
+ );
+ if (status)
+ goto error1;
+
+ /* Do architecture-specific initialization */
+ if ((status = arch_aspace_create(aspace)) != 0)
+ goto error2;
+
+ /* Add new address space to a hash table, for quick lookups by ID */
+ spin_lock_irqsave(&htable_lock, flags);
+ BUG_ON(htable_add(htable, aspace));
+ spin_unlock_irqrestore(&htable_lock, flags);
+
+ if (id)
+ *id = new_id;
+ return 0;
+
+error2:
+ BUG_ON(__aspace_del_region(aspace,PAGE_OFFSET,ULONG_MAX-PAGE_OFFSET+1));
+error1:
+ idspace_free_id(idspace, aspace->id);
+ kmem_free(aspace);
+ return status;
+}
+
+int
+sys_aspace_create(id_t id_request, const char __user *name, id_t __user *id)
+{
+ int status;
+ char _name[16];
+ id_t _id;
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if ((id_request != ANY_ID) && !id_ok(id_request))
+ return -EINVAL;
+
+ if (strncpy_from_user(_name, name, sizeof(_name)) < 0)
+ return -EFAULT;
+ _name[sizeof(_name) - 1] = '\0';
+
+ if ((status = aspace_create(id_request, _name, &_id)) != 0)
+ return status;
+
+ BUG_ON(!id_ok(_id));
+
+ if (id && copy_to_user(id, &_id, sizeof(*id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+aspace_destroy(id_t id)
+{
+ struct aspace *aspace;
+ struct list_head *pos, *tmp;
+ struct region *rgn;
+ unsigned long irqstate;
+
+ /* Lock the hash table, lookup aspace object by ID */
+ spin_lock_irqsave(&htable_lock, irqstate);
+ if ((aspace = htable_lookup(htable, id)) == NULL) {
+ spin_unlock_irqrestore(&htable_lock, irqstate);
+ return -EINVAL;
+ }
+
+ /* Lock the identified aspace */
+ spin_lock(&aspace->lock);
+
+ if (aspace->refcnt) {
+ spin_unlock(&aspace->lock);
+ spin_unlock_irqrestore(&htable_lock, irqstate);
+ return -EBUSY;
+ }
+
+ /* Remove aspace from hash table, preventing others from finding it */
+ BUG_ON(htable_del(htable, aspace));
+
+ /* Unlock the hash table, others may now use it */
+ spin_unlock_irqrestore(&htable_lock, irqstate);
+ spin_unlock(&aspace->lock);
+
+ /* Finish up destroying the aspace, we have the only reference */
+ list_for_each_safe(pos, tmp, &aspace->region_list) {
+ rgn = list_entry(pos, struct region, link);
+ /* Must drop our reference on all SMARTMAP'ed aspaces */
+ if (rgn->flags & VM_SMARTMAP) {
+ struct aspace *src;
+ spin_lock_irqsave(&htable_lock, irqstate);
+ src = htable_lookup(htable, rgn->smartmap);
+ BUG_ON(src == NULL);
+ spin_lock(&src->lock);
+ --src->refcnt;
+ spin_unlock(&src->lock);
+ spin_unlock_irqrestore(&htable_lock, irqstate);
+ }
+ list_del(&rgn->link);
+ kmem_free(rgn);
+ }
+ arch_aspace_destroy(aspace);
+ BUG_ON(idspace_free_id(idspace, aspace->id));
+ kmem_free(aspace);
+ return 0;
+}
+
+int
+sys_aspace_destroy(id_t id)
+{
+ if (current->uid != 0)
+ return -EPERM;
+ if (!id_ok(id))
+ return -EINVAL;
+ return aspace_destroy(id);
+}
+
+/**
+ * Acquires an address space object. The object is guaranteed not to be
+ * deleted until it is released via aspace_release().
+ */
+struct aspace *
+aspace_acquire(id_t id)
+{
+ struct aspace *aspace;
+ unsigned long irqstate;
+
+ local_irq_save(irqstate);
+ if ((aspace = lookup_and_lock(id)) != NULL) {
+ ++aspace->refcnt;
+ spin_unlock(&aspace->lock);
+ }
+ local_irq_restore(irqstate);
+ return aspace;
+}
+
+/**
+ * Releases an aspace object that was previously acquired via aspace_acquire().
+ * The aspace object passed in must be unlocked.
+ */
+void
+aspace_release(struct aspace *aspace)
+{
+ unsigned long irqstate;
+ spin_lock_irqsave(&aspace->lock, irqstate);
+ --aspace->refcnt;
+ spin_unlock_irqrestore(&aspace->lock, irqstate);
+}
+
+int
+__aspace_find_hole(struct aspace *aspace,
+ vaddr_t start_hint, size_t extent, size_t alignment,
+ vaddr_t *start)
+{
+ struct region *rgn;
+ vaddr_t hole;
+
+ if (!aspace || !extent || !is_power_of_2(alignment))
+ return -EINVAL;
+
+ if (start_hint == 0)
+ start_hint = 1;
+
+ hole = round_up(start_hint, alignment);
+ while ((rgn = find_overlapping_region(aspace, hole, hole + extent))) {
+ if (rgn->end == ULONG_MAX)
+ return -ENOENT;
+ hole = round_up(rgn->end, alignment);
+ }
+
+ if (start)
+ *start = hole;
+ return 0;
+}
+
+int
+aspace_find_hole(id_t id,
+ vaddr_t start_hint, size_t extent, size_t alignment,
+ vaddr_t *start)
+{
+ int status;
+ struct aspace *aspace;
+ unsigned long irqstate;
+
+ local_irq_save(irqstate);
+ aspace = lookup_and_lock(id);
+ status = __aspace_find_hole(aspace, start_hint, extent, alignment,
+ start);
+ if (aspace) spin_unlock(&aspace->lock);
+ local_irq_restore(irqstate);
+ return status;
+}
+
+int
+sys_aspace_find_hole(id_t id,
+ vaddr_t start_hint, size_t extent, size_t alignment,
+ vaddr_t __user *start)
+{
+ vaddr_t _start;
+ int status;
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if (!id_ok(id))
+ return -EINVAL;
+
+ status = aspace_find_hole(id, start_hint, extent, alignment, &_start);
+ if (status)
+ return status;
+
+ if (start && copy_to_user(start, &_start, sizeof(_start)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+__aspace_add_region(struct aspace *aspace,
+ vaddr_t start, size_t extent,
+ vmflags_t flags, vmpagesize_t pagesz,
+ const char *name)
+{
+ struct region *rgn;
+ struct region *cur;
+ struct list_head *pos;
+ vaddr_t end = calc_end(start, extent);
+
+ if (!aspace || !start)
+ return -EINVAL;
+
+ /* Region must have non-zero size */
+ if (extent == 0) {
+ printk(KERN_WARNING "Extent must be non-zero.\n");
+ return -EINVAL;
+ }
+
+ /* Region must have a positive size */
+ if (start >= end) {
+ printk(KERN_WARNING
+ "Invalid region size (start=0x%lx, extent=0x%lx).\n",
+ start, extent);
+ return -EINVAL;
+ }
+
+ /* Architecture must support the page size specified */
+ if ((pagesz & cpu_info[0].pagesz_mask) == 0) {
+ printk(KERN_WARNING
+ "Invalid page size specified (pagesz=0x%lx).\n",
+ pagesz);
+ return -EINVAL;
+ }
+ pagesz &= cpu_info[0].pagesz_mask;
+
+ /* Only one page size may be specified */
+ if (!is_power_of_2(pagesz)) {
+ printk(KERN_WARNING
+ "More than one page size specified (pagesz=0x%lx).\n",
+ pagesz);
+ return -EINVAL;
+ }
+
+ /* Region must be aligned to at least the specified page size */
+ if ((start & (pagesz-1)) || ((end!=ULONG_MAX) && (end & (pagesz-1)))) {
+ printk(KERN_WARNING
+ "Region is misaligned (start=0x%lx, end=0x%lx).\n",
+ start, end);
+ return -EINVAL;
+ }
+
+ /* Region must not overlap with any existing regions */
+ list_for_each_entry(cur, &aspace->region_list, link) {
+ if ((start < cur->end) && (end > cur->start)) {
+ printk(KERN_WARNING
+ "Region overlaps with existing region.\n");
+ return -ENOTUNIQ;
+ }
+ }
+
+ /* Allocate and initialize a new region object */
+ if ((rgn = kmem_alloc(sizeof(struct region))) == NULL)
+ return -ENOMEM;
+
+ rgn->aspace = aspace;
+ rgn->start = start;
+ rgn->end = end;
+ rgn->flags = flags;
+ rgn->pagesz = pagesz;
+ if (name)
+ strlcpy(rgn->name, name, sizeof(rgn->name));
+
+ /* The heap region is special, remember its bounds */
+ if (flags & VM_HEAP) {
+ aspace->heap_start = start;
+ aspace->heap_end = end;
+ aspace->brk = aspace->heap_start;
+ aspace->mmap_brk = aspace->heap_end;
+ }
+
+ /* Insert region into address space's sorted region list */
+ list_for_each(pos, &aspace->region_list) {
+ cur = list_entry(pos, struct region, link);
+ if (cur->start > rgn->start)
+ break;
+ }
+ list_add_tail(&rgn->link, pos);
+ return 0;
+}
+
+int
+aspace_add_region(id_t id,
+ vaddr_t start, size_t extent,
+ vmflags_t flags, vmpagesize_t pagesz,
+ const char *name)
+{
+ int status;
+ struct aspace *aspace;
+ unsigned long irqstate;
+
+ local_irq_save(irqstate);
+ aspace = lookup_and_lock(id);
+ status = __aspace_add_region(aspace, start, extent, flags, pagesz, name);
+ if (aspace) spin_unlock(&aspace->lock);
+ local_irq_restore(irqstate);
+ return status;
+}
+
+int
+sys_aspace_add_region(id_t id,
+ vaddr_t start, size_t extent,
+ vmflags_t flags, vmpagesize_t pagesz,
+ const char __user *name)
+{
+ char _name[16];
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if (!id_ok(id))
+ return -EINVAL;
+
+ if (strncpy_from_user(_name, name, sizeof(_name)) < 0)
+ return -EFAULT;
+ _name[sizeof(_name) - 1] = '\0';
+
+ return aspace_add_region(id, start, extent, flags, pagesz, _name);
+}
+
+
+int
+__aspace_del_region(struct aspace *aspace, vaddr_t start, size_t extent)
+{
+ int status;
+ struct region *rgn;
+ vaddr_t end = calc_end(start, extent);
+
+ if (!aspace)
+ return -EINVAL;
+
+ /* Locate the region to delete */
+ rgn = find_region(aspace, start);
+ if (!rgn || (rgn->start != start) || (rgn->end != end)
+ || (rgn->flags & VM_KERNEL))
+ return -EINVAL;
+
+ if (!(rgn->flags & VM_SMARTMAP)) {
+ /* Unmap all of the memory that was mapped to the region */
+ status = __aspace_unmap_pmem(aspace, start, extent);
+ if (status)
+ return status;
+ }
+
+ /* Remove the region from the address space */
+ list_del(&rgn->link);
+ kmem_free(rgn);
+ return 0;
+}
+
+int
+aspace_del_region(id_t id, vaddr_t start, size_t extent)
+{
+ int status;
+ struct aspace *aspace;
+ unsigned long irqstate;
+
+ local_irq_save(irqstate);
+ aspace = lookup_and_lock(id);
+ status = __aspace_del_region(aspace, start, extent);
+ if (aspace) spin_unlock(&aspace->lock);
+ local_irq_restore(irqstate);
+ return status;
+}
+
+int
+sys_aspace_del_region(id_t id, vaddr_t start, size_t extent)
+{
+ if (current->uid != 0)
+ return -EPERM;
+ if (!id_ok(id))
+ return -EINVAL;
+ return aspace_del_region(id, start, extent);
+}
+
+static int
+map_pmem(struct aspace *aspace,
+ paddr_t pmem, vaddr_t start, size_t extent,
+ bool umem_only)
+{
+ int status;
+ struct region *rgn;
+
+ if (!aspace)
+ return -EINVAL;
+
+ if (umem_only && !pmem_is_umem(pmem, extent)) {
+ printk(KERN_WARNING
+ "User-space tried to map non-UMEM "
+ "(pmem=0x%lx, extent=0x%lx).\n",
+ pmem, extent);
+ return -EPERM;
+ }
+
+ while (extent) {
+ /* Find region covering the address */
+ rgn = find_region(aspace, start);
+ if (!rgn) {
+ printk(KERN_WARNING
+ "Failed to find region covering addr=0x%lx.\n",
+ start);
+ return -EINVAL;
+ }
+
+ /* Can't map anything to kernel or SMARTMAP regions */
+ if ((rgn->flags & VM_KERNEL) || (rgn->flags & VM_SMARTMAP)) {
+ printk(KERN_WARNING
+ "Trying to map memory to protected region.\n");
+ return -EINVAL;
+ }
+
+ /* addresses must be aligned to region's page size */
+ if ((start & (rgn->pagesz-1)) || (pmem & (rgn->pagesz-1))) {
+ printk(KERN_WARNING
+ "Misalignment "
+ "(start=0x%lx, pmem=0x%lx, pagesz=0x%lx).\n",
+ start, pmem, rgn->pagesz);
+ return -EINVAL;
+ }
+
+ /* Map until full extent mapped or end of region is reached */
+ while (extent && (start < rgn->end)) {
+
+ status =
+ arch_aspace_map_page(
+ aspace,
+ start,
+ pmem,
+ rgn->flags,
+ rgn->pagesz
+ );
+ if (status)
+ return status;
+
+ extent -= rgn->pagesz;
+ start += rgn->pagesz;
+ pmem += rgn->pagesz;
+ }
+ }
+
+ return 0;
+}
+
+static int
+map_pmem_locked(id_t id,
+ paddr_t pmem, vaddr_t start, size_t extent,
+ bool umem_only)
+{
+ int status;
+ struct aspace *aspace;
+ unsigned long irqstate;
+
+ local_irq_save(irqstate);
+ aspace = lookup_and_lock(id);
+ status = map_pmem(aspace, pmem, start, extent, umem_only);
+ if (aspace) spin_unlock(&aspace->lock);
+ local_irq_restore(irqstate);
+ return status;
+}
+
+int
+__aspace_map_pmem(struct aspace *aspace,
+ paddr_t pmem, vaddr_t start, size_t extent)
+{
+ return map_pmem(aspace, pmem, start, extent, false);
+}
+
+int
+aspace_map_pmem(id_t id, paddr_t pmem, vaddr_t start, size_t extent)
+{
+ return map_pmem_locked(id, pmem, start, extent, false);
+}
+
+int
+sys_aspace_map_pmem(id_t id, paddr_t pmem, vaddr_t start, size_t extent)
+{
+ if (current->uid != 0)
+ return -EPERM;
+ if (!id_ok(id))
+ return -EINVAL;
+ return map_pmem_locked(id, pmem, start, extent, true);
+}
+
+int
+__aspace_unmap_pmem(struct aspace *aspace, vaddr_t start, size_t extent)
+{
+ struct region *rgn;
+
+ if (!aspace)
+ return -EINVAL;
+
+ while (extent) {
+ /* Find region covering the address */
+ rgn = find_region(aspace, start);
+ if (!rgn) {
+ printk(KERN_WARNING
+ "Failed to find region covering addr=0x%lx.\n",
+ start);
+ return -EINVAL;
+ }
+
+ /* Can't unmap anything from kernel or SMARTMAP regions */
+ if ((rgn->flags & VM_KERNEL) || (rgn->flags & VM_SMARTMAP)) {
+ printk(KERN_WARNING
+ "Trying to map memory to protected region.\n");
+ return -EINVAL;
+ }
+
+ /* address must be aligned to region's page size */
+ if (start & (rgn->pagesz-1)) {
+ printk(KERN_WARNING
+ "Misalignment (start=0x%lx, pagesz=0x%lx).\n",
+ start, rgn->pagesz);
+ return -EINVAL;
+ }
+
+ /* Unmap until full extent unmapped or end of region is reached */
+ while (extent && (start < rgn->end)) {
+
+ arch_aspace_unmap_page(
+ aspace,
+ start,
+ rgn->pagesz
+ );
+
+ extent -= rgn->pagesz;
+ start += rgn->pagesz;
+ }
+ }
+
+ return 0;
+}
+
+int
+aspace_unmap_pmem(id_t id, vaddr_t start, size_t extent)
+{
+ int status;
+ struct aspace *aspace;
+ unsigned long irqstate;
+
+ local_irq_save(irqstate);
+ aspace = lookup_and_lock(id);
+ status = __aspace_unmap_pmem(aspace, start, extent);
+ if (aspace) spin_unlock(&aspace->lock);
+ local_irq_restore(irqstate);
+ return status;
+}
+
+int
+sys_aspace_unmap_pmem(id_t id, vaddr_t start, size_t extent)
+{
+ if (current->uid != 0)
+ return -EPERM;
+ if (!id_ok(id))
+ return -EINVAL;
+ return aspace_unmap_pmem(id, start, extent);
+}
+
+int
+__aspace_smartmap(struct aspace *src, struct aspace *dst,
+ vaddr_t start, size_t extent)
+{
+ int status;
+ vaddr_t end = start + extent;
+ char name[16];
+ struct region *rgn;
+
+ /* Can only SMARTMAP a given aspace in once */
+ if (find_smartmap_region(dst, src->id))
+ return -EINVAL;
+
+ if (start >= end)
+ return -EINVAL;
+
+ if ((start & (SMARTMAP_ALIGN-1)) || (end & (SMARTMAP_ALIGN-1)))
+ return -EINVAL;
+
+ snprintf(name, sizeof(name), "SMARTMAP-%u", (unsigned int)src->id);
+ if ((status = __aspace_add_region(dst, start, extent,
+ VM_SMARTMAP, PAGE_SIZE, name)))
+ return status;
+
+ /* Do architecture-specific SMARTMAP initialization */
+ if ((status = arch_aspace_smartmap(src, dst, start, extent))) {
+ BUG_ON(__aspace_del_region(dst, start, extent));
+ return status;
+ }
+
+ /* Remember the source aspace that the SMARTMAP region is mapped to */
+ rgn = find_region(dst, start);
+ BUG_ON(!rgn);
+ rgn->smartmap = src->id;
+
+ /* Ensure source aspace doesn't go away while we have it SMARTMAP'ed */
+ ++src->refcnt;
+
+ return 0;
+}
+
+int
+aspace_smartmap(id_t src, id_t dst, vaddr_t start, size_t extent)
+{
+ int status;
+ struct aspace *src_spc, *dst_spc;
+ unsigned long irqstate;
+
+ /* Don't allow self SMARTMAP'ing */
+ if (src == dst)
+ return -EINVAL;
+
+ local_irq_save(irqstate);
+ if ((status = lookup_and_lock_two(src, dst, &src_spc, &dst_spc))) {
+ local_irq_restore(irqstate);
+ return status;
+ }
+ status = __aspace_smartmap(src_spc, dst_spc, start, extent);
+ spin_unlock(&src_spc->lock);
+ spin_unlock(&dst_spc->lock);
+ local_irq_restore(irqstate);
+ return status;
+}
+
+int
+sys_aspace_smartmap(id_t src, id_t dst, vaddr_t start, size_t extent)
+{
+ if (current->uid != 0)
+ return -EPERM;
+ if (!id_ok(src) || !id_ok(dst))
+ return -EINVAL;
+ return aspace_smartmap(src, dst, start, extent);
+}
+
+int
+__aspace_unsmartmap(struct aspace *src, struct aspace *dst)
+{
+ struct region *rgn;
+ size_t extent;
+
+ if ((rgn = find_smartmap_region(dst, src->id)) == NULL)
+ return -EINVAL;
+ extent = rgn->end - rgn->start;
+
+ /* Do architecture-specific SMARTMAP unmapping */
+ BUG_ON(arch_aspace_unsmartmap(src, dst, rgn->start, extent));
+
+ /* Delete the SMARTMAP region and release our reference on the source */
+ BUG_ON(__aspace_del_region(dst, rgn->start, extent));
+ --src->refcnt;
+
+ return 0;
+}
+
+int
+aspace_unsmartmap(id_t src, id_t dst)
+{
+ int status;
+ struct aspace *src_spc, *dst_spc;
+ unsigned long irqstate;
+
+ /* Don't allow self SMARTMAP'ing */
+ if (src == dst)
+ return -EINVAL;
+
+ local_irq_save(irqstate);
+ if ((status = lookup_and_lock_two(src, dst, &src_spc, &dst_spc))) {
+ local_irq_restore(irqstate);
+ return status;
+ }
+ status = __aspace_unsmartmap(src_spc, dst_spc);
+ spin_unlock(&src_spc->lock);
+ spin_unlock(&dst_spc->lock);
+ local_irq_restore(irqstate);
+ return status;
+}
+
+int
+sys_aspace_unsmartmap(id_t src, id_t dst)
+{
+ if (current->uid != 0)
+ return -EPERM;
+ if (!id_ok(src) || !id_ok(dst))
+ return -EINVAL;
+ return aspace_unsmartmap(src, dst);
+}
+
+int
+aspace_dump2console(id_t id)
+{
+ struct aspace *aspace;
+ struct region *rgn;
+ unsigned long irqstate;
+
+ local_irq_save(irqstate);
+
+ if ((aspace = lookup_and_lock(id)) == NULL) {
+ local_irq_restore(irqstate);
+ return -EINVAL;
+ }
+
+ printk(KERN_DEBUG "DUMP OF ADDRESS SPACE %u:\n", aspace->id);
+ printk(KERN_DEBUG " name: %s\n", aspace->name);
+ printk(KERN_DEBUG " refcnt: %d\n", aspace->refcnt);
+ printk(KERN_DEBUG " regions:\n");
+ list_for_each_entry(rgn, &aspace->region_list, link) {
+ printk(KERN_DEBUG
+ " [0x%016lx, 0x%016lx%c %s\n",
+ rgn->start,
+ rgn->end,
+ (rgn->end == ULONG_MAX) ? ']' : ')',
+ rgn->name
+ );
+ }
+
+ spin_unlock(&aspace->lock);
+ local_irq_restore(irqstate);
+ return 0;
+}
+
+int
+sys_aspace_dump2console(id_t id)
+{
+ return aspace_dump2console(id);
+}
--- /dev/null
+../user/liblwk/aspace.c
\ No newline at end of file
--- /dev/null
+/*
+ * lwk/mm/bootmem.c
+ *
+ * Copyright (C) 1999 Ingo Molnar
+ * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ *
+ * simple boot-time physical memory area allocator and
+ * free memory collector. It's used to deal with reserved
+ * system memory and memory holes as well.
+ */
+
+#include <lwk/init.h>
+#include <lwk/pfn.h>
+#include <lwk/bootmem.h>
+#include <lwk/params.h>
+#include <lwk/log2.h>
+#include <lwk/pmem.h>
+#include <lwk/kmem.h>
+#include <lwk/bitops.h>
+#include <arch/io.h>
+
+/**
+ * Set to true once bootmem allocator has been destroyed.
+ */
+static bool bootmem_destoyed = false;
+
+/**
+ * Access to this subsystem has to be serialized externally.
+ * (this is true for the boot process anyway)
+ */
+
+
+/**
+ * Amount of system memory to reserve for use by the kernel. The first
+ * kmem_size bytes of system memory [0, kmem_size) will be added to the
+ * kernel memory pool. The remainder of system memory is left untouched by
+ * the kernel and is available for use by applications.
+ */
+static unsigned long kmem_size = (1024 * 1024 * 8); /* default is first 8 MB */
+param(kmem_size, ulong);
+
+
+/**
+ *
+ */
+static bootmem_data_t __initdata bootmem_data;
+
+/**
+ * List of bootmem_data structures, each describing a section of
+ * physical memory.
+ */
+static LIST_HEAD(bdata_list);
+
+/**
+ * Returns the number of _pages_ that will be allocated for the boot bitmap.
+ */
+unsigned long __init
+bootmem_bootmap_pages(unsigned long pages)
+{
+ unsigned long mapsize;
+
+ mapsize = (pages+7)/8;
+ mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
+ mapsize >>= PAGE_SHIFT;
+
+ return mapsize;
+}
+
+/**
+ * Links a newly created bootmem_data structure to the bdata_list.
+ */
+static void __init
+link_bootmem(bootmem_data_t *bdata)
+{
+ bootmem_data_t *ent;
+ if (list_empty(&bdata_list)) {
+ list_add(&bdata->list, &bdata_list);
+ return;
+ }
+ /* insert in order */
+ list_for_each_entry(ent, &bdata_list, list) {
+ if (bdata->node_boot_start < ent->node_boot_start) {
+ list_add_tail(&bdata->list, &ent->list);
+ return;
+ }
+ }
+ list_add_tail(&bdata->list, &bdata_list);
+ return;
+}
+
+/**
+ * Called once to set up the allocator itself.
+ */
+static unsigned long __init
+init_bootmem_core(
+ bootmem_data_t *bdata,
+ unsigned long mapstart,
+ unsigned long start,
+ unsigned long end
+)
+{
+ unsigned long mapsize = ((end - start)+7)/8;
+
+ mapsize = ALIGN(mapsize, sizeof(long));
+ bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
+ bdata->node_boot_start = (start << PAGE_SHIFT);
+ bdata->node_low_pfn = end;
+ link_bootmem(bdata);
+
+ /*
+ * Initially all pages are reserved - setup_arch() has to
+ * register free RAM areas explicitly.
+ */
+ memset(bdata->node_bootmem_map, 0xff, mapsize);
+
+ return mapsize;
+}
+
+/**
+ * Marks a particular physical memory range as unallocatable. Usable RAM
+ * might be used for boot-time allocations - or it might get added
+ * to the free page pool later on.
+ */
+static void __init
+reserve_bootmem_core(
+ bootmem_data_t *bdata,
+ unsigned long addr,
+ unsigned long size
+)
+{
+ unsigned long sidx, eidx;
+ unsigned long i;
+
+ /*
+ * round up, partially reserved pages are considered
+ * fully reserved.
+ */
+ BUG_ON(!size);
+ BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn);
+ BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn);
+
+ sidx = PFN_DOWN(addr - bdata->node_boot_start);
+ eidx = PFN_UP(addr + size - bdata->node_boot_start);
+
+ for (i = sidx; i < eidx; i++) {
+ if (test_and_set_bit(i, bdata->node_bootmem_map)) {
+#ifdef CONFIG_DEBUG_BOOTMEM
+ printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
+#endif
+ }
+ }
+}
+
+/**
+ * Frees a section of bootmemory.
+ */
+static void __init
+free_bootmem_core(
+ bootmem_data_t *bdata,
+ unsigned long addr,
+ unsigned long size
+)
+{
+ unsigned long i;
+ unsigned long start;
+ /*
+ * round down end of usable mem, partially free pages are
+ * considered reserved.
+ */
+ unsigned long sidx;
+ unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE;
+ unsigned long end = (addr + size)/PAGE_SIZE;
+
+ BUG_ON(!size);
+ BUG_ON(end > bdata->node_low_pfn);
+
+ if (addr < bdata->last_success)
+ bdata->last_success = addr;
+
+ /*
+ * Round up the beginning of the address.
+ */
+ start = (addr + PAGE_SIZE-1) / PAGE_SIZE;
+ sidx = start - (bdata->node_boot_start/PAGE_SIZE);
+
+ for (i = sidx; i < eidx; i++) {
+ if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
+ BUG();
+ }
+}
+
+/**
+ * We 'merge' subsequent allocations to save space. We might 'lose'
+ * some fraction of a page if allocations cannot be satisfied due to
+ * size constraints on boxes where there is physical RAM space
+ * fragmentation - in these cases (mostly large memory boxes) this
+ * is not a problem.
+ *
+ * On low memory boxes we get it right in 100% of the cases.
+ *
+ * alignment has to be a power of 2 value.
+ *
+ * NOTE: This function is _not_ reentrant.
+ */
+void * __init
+__alloc_bootmem_core(
+ struct bootmem_data *bdata,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit
+)
+{
+ unsigned long offset, remaining_size, areasize, preferred;
+ unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn;
+ void *ret;
+
+ if (bootmem_destoyed)
+ panic("The bootmem allocator has been destroyed.");
+
+ if(!size) {
+ printk("__alloc_bootmem_core(): zero-sized request\n");
+ BUG();
+ }
+ BUG_ON(align & (align-1));
+
+ if (limit && bdata->node_boot_start >= limit)
+ return NULL;
+
+ limit >>=PAGE_SHIFT;
+ if (limit && end_pfn > limit)
+ end_pfn = limit;
+
+ eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
+ offset = 0;
+ if (align &&
+ (bdata->node_boot_start & (align - 1UL)) != 0)
+ offset = (align - (bdata->node_boot_start & (align - 1UL)));
+ offset >>= PAGE_SHIFT;
+
+ /*
+ * We try to allocate bootmem pages above 'goal'
+ * first, then we try to allocate lower pages.
+ */
+ if (goal && (goal >= bdata->node_boot_start) &&
+ ((goal >> PAGE_SHIFT) < end_pfn)) {
+ preferred = goal - bdata->node_boot_start;
+
+ if (bdata->last_success >= preferred)
+ if (!limit || (limit && limit > bdata->last_success))
+ preferred = bdata->last_success;
+ } else
+ preferred = 0;
+
+ preferred = ALIGN(preferred, align) >> PAGE_SHIFT;
+ preferred += offset;
+ areasize = (size+PAGE_SIZE-1)/PAGE_SIZE;
+ incr = align >> PAGE_SHIFT ? : 1;
+
+restart_scan:
+ for (i = preferred; i < eidx; i += incr) {
+ unsigned long j;
+ i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
+ i = ALIGN(i, incr);
+ if (i >= eidx)
+ break;
+ if (test_bit(i, bdata->node_bootmem_map))
+ continue;
+ for (j = i + 1; j < i + areasize; ++j) {
+ if (j >= eidx)
+ goto fail_block;
+ if (test_bit (j, bdata->node_bootmem_map))
+ goto fail_block;
+ }
+ start = i;
+ goto found;
+ fail_block:
+ i = ALIGN(j, incr);
+ }
+
+ if (preferred > offset) {
+ preferred = offset;
+ goto restart_scan;
+ }
+ return NULL;
+
+found:
+ bdata->last_success = start << PAGE_SHIFT;
+ BUG_ON(start >= eidx);
+
+ /*
+ * Is the next page of the previous allocation-end the start
+ * of this allocation's buffer? If yes then we can 'merge'
+ * the previous partial page with this allocation.
+ */
+ if (align < PAGE_SIZE &&
+ bdata->last_offset && bdata->last_pos+1 == start) {
+ offset = ALIGN(bdata->last_offset, align);
+ BUG_ON(offset > PAGE_SIZE);
+ remaining_size = PAGE_SIZE-offset;
+ if (size < remaining_size) {
+ areasize = 0;
+ /* last_pos unchanged */
+ bdata->last_offset = offset+size;
+ ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
+ bdata->node_boot_start);
+ } else {
+ remaining_size = size - remaining_size;
+ areasize = (remaining_size+PAGE_SIZE-1)/PAGE_SIZE;
+ ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
+ bdata->node_boot_start);
+ bdata->last_pos = start+areasize-1;
+ bdata->last_offset = remaining_size;
+ }
+ bdata->last_offset &= ~PAGE_MASK;
+ } else {
+ bdata->last_pos = start + areasize - 1;
+ bdata->last_offset = size & ~PAGE_MASK;
+ ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
+ }
+
+ /*
+ * Reserve the area now:
+ */
+ for (i = start; i < start+areasize; i++)
+ if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map)))
+ BUG();
+ memset(ret, 0, size);
+ return ret;
+}
+
+static void __init
+free_all_bootmem_core(struct bootmem_data *bdata)
+{
+ unsigned long pfn;
+ unsigned long vaddr;
+ unsigned long i, m, count;
+ unsigned long bootmem_total=0, kmem_total=0, umem_total=0;
+ unsigned long kmem_max_idx, max_idx;
+ unsigned long *map;
+ struct pmem_region rgn;
+
+ BUG_ON(!bdata->node_bootmem_map);
+
+ kmem_max_idx = (kmem_size >> PAGE_SHIFT) - (bdata->node_boot_start >> PAGE_SHIFT);
+ max_idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
+ BUG_ON(kmem_max_idx > max_idx);
+
+ /* Create the initial kernel managed memory pool (kmem) */
+ count = 0;
+ pfn = bdata->node_boot_start >> PAGE_SHIFT; /* first extant page of node */
+ map = bdata->node_bootmem_map;
+ for (i = 0; i < kmem_max_idx; ) {
+ unsigned long v = ~map[i / BITS_PER_LONG];
+
+ if (v) {
+ vaddr = (unsigned long) __va(pfn << PAGE_SHIFT);
+ for (m = 1; m && i < kmem_max_idx; m<<=1, vaddr+=PAGE_SIZE, i++) {
+ if (v & m) {
+ count++;
+ kmem_add_memory(vaddr, PAGE_SIZE);
+ }
+ }
+ } else {
+ i+=BITS_PER_LONG;
+ }
+ pfn += BITS_PER_LONG;
+ }
+ BUG_ON(count == 0);
+
+ /*
+ * At this point, kmem_alloc() will work. The physical memory tracking
+ * code relies on kmem_alloc(), so it cannot be initialized until now.
+ *
+ * Tell the physical memory tracking subsystem about the kernel-managed
+ * pool and the remaining memory that will be managed by user-space.
+ */
+ pfn = bdata->node_boot_start >> PAGE_SHIFT; /* first extant page of node */
+ map = bdata->node_bootmem_map;
+ pmem_region_unset_all(&rgn);
+ rgn.type_is_set = true;
+ rgn.allocated_is_set = true;
+ rgn.lgroup_is_set = true;
+ for (i = 0; i < max_idx; ) {
+ unsigned long v = ~map[i / BITS_PER_LONG];
+ unsigned long paddr = (unsigned long) pfn << PAGE_SHIFT;
+
+ for (m = 1; m && i < max_idx; m<<=1, paddr+=PAGE_SIZE, i++) {
+ rgn.start = paddr;
+ rgn.end = paddr + PAGE_SIZE;
+
+ if (v & m) {
+ if (i < kmem_max_idx) {
+ rgn.type = PMEM_TYPE_KMEM;
+ rgn.allocated = true;
+ rgn.lgroup = 0;
+ ++kmem_total;
+ } else {
+ rgn.type = PMEM_TYPE_UMEM;
+ rgn.allocated = false;
+ rgn.lgroup = 0;
+ ++umem_total;
+ }
+ } else {
+ rgn.type = PMEM_TYPE_BOOTMEM;
+ rgn.allocated = true;
+ rgn.lgroup = 0;
+ ++bootmem_total;
+ }
+
+ if (pmem_add(&rgn))
+ BUG();
+ }
+
+ pfn += BITS_PER_LONG;
+ }
+
+ /*
+ * Now free the allocator bitmap itself, it's not
+ * needed anymore:
+ */
+ vaddr = (unsigned long)bdata->node_bootmem_map;
+ count = 0;
+ pmem_region_unset_all(&rgn);
+ rgn.type_is_set = true;
+ rgn.allocated_is_set = true;
+ rgn.lgroup_is_set = true;
+ for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,vaddr+=PAGE_SIZE) {
+ count++;
+
+ rgn.start = __pa(vaddr);
+ rgn.end = rgn.start + PAGE_SIZE;
+
+ if (i < kmem_max_idx) {
+ kmem_add_memory(vaddr, PAGE_SIZE);
+ rgn.type = PMEM_TYPE_KMEM;
+ rgn.allocated = true;
+ rgn.lgroup = 0;
+ } else {
+ rgn.type = PMEM_TYPE_UMEM;
+ rgn.allocated = false;
+ rgn.lgroup = 0;
+ }
+
+ pmem_add(&rgn);
+ }
+ BUG_ON(count == 0);
+
+ /* Mark the bootmem allocator as dead */
+ bdata->node_bootmem_map = NULL;
+
+ printk(KERN_DEBUG
+ "The boot-strap bootmem allocator has been destroyed:\n");
+ printk(KERN_DEBUG
+ " %lu bytes released to the kernel-managed memory pool (kmem)\n",
+ kmem_total << PAGE_SHIFT);
+ printk(KERN_DEBUG
+ " %lu bytes released to the user-managed memory pool (umem)\n",
+ umem_total << PAGE_SHIFT);
+}
+
+/**
+ * Initialize boot memory allocator.
+ */
+unsigned long __init
+init_bootmem(unsigned long start, unsigned long pages)
+{
+ return init_bootmem_core(&bootmem_data, start, 0, pages);
+}
+
+/**
+ * Reserve a portion of the boot memory.
+ * This prevents the reserved memory from being allocated.
+ */
+void __init
+reserve_bootmem(unsigned long addr, unsigned long size)
+{
+ reserve_bootmem_core(&bootmem_data, addr, size);
+}
+
+/**
+ * Return a portion of boot memory to the free pool.
+ * Note that the region freed is the set of pages covering
+ * the byte range [addr, addr+size).
+ */
+void __init
+free_bootmem(unsigned long addr, unsigned long size)
+{
+ free_bootmem_core(&bootmem_data, addr, size);
+}
+
+void __init
+free_all_bootmem(void)
+{
+ free_all_bootmem_core(&bootmem_data);
+ bootmem_destoyed = true;
+}
+
+static void * __init
+__alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal)
+{
+ bootmem_data_t *bdata;
+ void *ptr;
+
+ list_for_each_entry(bdata, &bdata_list, list)
+ if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0)))
+ return(ptr);
+ return NULL;
+}
+
+/**
+ * Allocate a chunk of memory from the boot memory allocator.
+ *
+ * size = number of bytes requested
+ * align = required alignment
+ * goal = hint specifying address to start search.
+ */
+void * __init
+__alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal)
+{
+ void *mem = __alloc_bootmem_nopanic(size,align,goal);
+ if (mem)
+ return mem;
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
+}
+
+/**
+ * Allocates a block of memory of the specified size.
+ */
+void * __init
+alloc_bootmem(unsigned long size)
+{
+ return __alloc_bootmem(size, SMP_CACHE_BYTES, 0);
+}
+
+/**
+ * Allocates a block of memory of the specified size and alignment.
+ */
+void * __init
+alloc_bootmem_aligned(unsigned long size, unsigned long align)
+{
+ return __alloc_bootmem(size, align, 0);
+}
+
+/**
+ * Initializes the kernel memory subsystem.
+ */
+void __init
+mem_subsys_init(void)
+{
+ /* We like powers of two */
+ if (!is_power_of_2(kmem_size)) {
+ printk(KERN_WARNING "kmem_size must be a power of two.");
+ kmem_size = roundup_pow_of_two(kmem_size);
+ }
+
+ printk(KERN_DEBUG
+ "First %lu bytes of system memory reserved for the kernel.\n",
+ kmem_size);
+
+ /* Initialize the kernel memory pool */
+ kmem_create_zone(PAGE_OFFSET, kmem_size);
+ free_all_bootmem();
+ arch_memsys_init(kmem_size);
+}
+
--- /dev/null
+/* Copyright (c) 2007, Sandia National Laboratories */
+
+#include <lwk/kernel.h>
+#include <lwk/log2.h>
+#include <lwk/buddy.h>
+#include <lwk/bootmem.h>
+
+
+/**
+ * Each free block has one of these structures at its head. The link member
+ * provides linkage for the mp->avail[order] free list, where order is the
+ * size of the free block.
+ */
+struct block {
+ struct list_head link;
+ unsigned long order;
+};
+
+
+/**
+ * Converts a block address to its block index in the specified buddy allocator.
+ * A block's index is used to find the block's tag bit, mp->tag_bits[block_id].
+ */
+static unsigned long
+block_to_id(struct buddy_mempool *mp, struct block *block)
+{
+ unsigned long block_id =
+ ((unsigned long)block - mp->base_addr) >> mp->min_order;
+ BUG_ON(block_id >= mp->num_blocks);
+ return block_id;
+}
+
+
+/**
+ * Marks a block as free by setting its tag bit to one.
+ */
+static void
+mark_available(struct buddy_mempool *mp, struct block *block)
+{
+ __set_bit(block_to_id(mp, block), mp->tag_bits);
+}
+
+
+/**
+ * Marks a block as allocated by setting its tag bit to zero.
+ */
+static void
+mark_allocated(struct buddy_mempool *mp, struct block *block)
+{
+ __clear_bit(block_to_id(mp, block), mp->tag_bits);
+}
+
+
+/**
+ * Returns true if block is free, false if it is allocated.
+ */
+static int
+is_available(struct buddy_mempool *mp, struct block *block)
+{
+ return test_bit(block_to_id(mp, block), mp->tag_bits);
+}
+
+
+/**
+ * Returns the address of the block's buddy block.
+ */
+static void *
+find_buddy(struct buddy_mempool *mp, struct block *block, unsigned long order)
+{
+ unsigned long _block;
+ unsigned long _buddy;
+
+ BUG_ON((unsigned long)block < mp->base_addr);
+
+ /* Fixup block address to be zero-relative */
+ _block = (unsigned long)block - mp->base_addr;
+
+ /* Calculate buddy in zero-relative space */
+ _buddy = _block ^ (1UL << order);
+
+ /* Return the buddy's address */
+ return (void *)(_buddy + mp->base_addr);
+}
+
+
+/**
+ * Initializes a buddy system memory allocator object.
+ *
+ * Arguments:
+ * [IN] base_addr: Base address of the memory pool.
+ * [IN] pool_order: Size of the memory pool (2^pool_order bytes).
+ * [IN] min_order: Minimum allocatable block size (2^min_order bytes).
+ *
+ * Returns:
+ * Success: Pointer to an initialized buddy system memory allocator.
+ * Failure: NULL
+ *
+ * NOTE: The min_order argument is provided as an optimization. Since one tag
+ * bit is required for each minimum-sized block, large memory pools that
+ * allow order 0 allocations will use large amounts of memory. Specifying
+ * a min_order of 5 (32 bytes), for example, reduces the number of tag
+ * bits by 32x.
+ */
+struct buddy_mempool *
+buddy_init(
+ unsigned long base_addr,
+ unsigned long pool_order,
+ unsigned long min_order
+)
+{
+ struct buddy_mempool *mp;
+ unsigned long i;
+
+ /* Smallest block size must be big enough to hold a block structure */
+ if ((1UL << min_order) < sizeof(struct block))
+ min_order = ilog2( roundup_pow_of_two(sizeof(struct block)) );
+
+ /* The minimum block order must be smaller than the pool order */
+ if (min_order > pool_order)
+ return NULL;
+
+ mp = alloc_bootmem(sizeof(struct buddy_mempool));
+
+ mp->base_addr = base_addr;
+ mp->pool_order = pool_order;
+ mp->min_order = min_order;
+
+ /* Allocate a list for every order up to the maximum allowed order */
+ mp->avail = alloc_bootmem((pool_order + 1) * sizeof(struct list_head));
+
+ /* Initially all lists are empty */
+ for (i = 0; i <= pool_order; i++)
+ INIT_LIST_HEAD(&mp->avail[i]);
+
+ /* Allocate a bitmap with 1 bit per minimum-sized block */
+ mp->num_blocks = (1UL << pool_order) / (1UL << min_order);
+ mp->tag_bits = alloc_bootmem(
+ BITS_TO_LONGS(mp->num_blocks) * sizeof(long)
+ );
+
+ /* Initially mark all minimum-sized blocks as allocated */
+ bitmap_zero(mp->tag_bits, mp->num_blocks);
+
+ return mp;
+}
+
+
+/**
+ * Allocates a block of memory of the requested size (2^order bytes).
+ *
+ * Arguments:
+ * [IN] mp: Buddy system memory allocator object.
+ * [IN] order: Block size to allocate (2^order bytes).
+ *
+ * Returns:
+ * Success: Pointer to the start of the allocated memory block.
+ * Failure: NULL
+ */
+void *
+buddy_alloc(struct buddy_mempool *mp, unsigned long order)
+{
+ unsigned long j;
+ struct list_head *list;
+ struct block *block;
+ struct block *buddy_block;
+
+ BUG_ON(mp == NULL);
+ BUG_ON(order > mp->pool_order);
+
+ /* Fixup requested order to be at least the minimum supported */
+ if (order < mp->min_order)
+ order = mp->min_order;
+
+ for (j = order; j <= mp->pool_order; j++) {
+
+ /* Try to allocate the first block in the order j list */
+ list = &mp->avail[j];
+ if (list_empty(list))
+ continue;
+ block = list_entry(list->next, struct block, link);
+ list_del(&block->link);
+ mark_allocated(mp, block);
+
+ /* Trim if a higher order block than necessary was allocated */
+ while (j > order) {
+ --j;
+ buddy_block = (struct block *)((unsigned long)block + (1UL << j));
+ buddy_block->order = j;
+ mark_available(mp, buddy_block);
+ list_add(&buddy_block->link, &mp->avail[j]);
+ }
+
+ return block;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Returns a block of memory to the buddy system memory allocator.
+ *
+ * Arguments:
+ * [IN] mp: Buddy system memory allocator object.
+ * [IN] addr: Address of memory block to free.
+ * [IN] order: Size of the memory block (2^order bytes).
+ */
+void
+buddy_free(struct buddy_mempool *mp, void *addr, unsigned long order)
+{
+ struct block *block;
+ struct block *buddy;
+
+ BUG_ON(mp == NULL);
+ BUG_ON(order > mp->pool_order);
+
+ /* Fixup requested order to be at least the minimum supported */
+ if (order < mp->min_order)
+ order = mp->min_order;
+
+ /* Overlay block structure on the memory block being freed */
+ block = addr;
+ BUG_ON(is_available(mp, block));
+
+ /* Coalesce as much as possible with adjacent free buddy blocks */
+ while (order < mp->pool_order) {
+ /* Determine our buddy block's address */
+ buddy = find_buddy(mp, block, order);
+
+ /* Make sure buddy is available and has the same size as us */
+ if (!is_available(mp, buddy))
+ break;
+ if (is_available(mp, buddy) && (buddy->order != order))
+ break;
+
+ /* OK, we're good to go... buddy merge! */
+ list_del(&buddy->link);
+ if (buddy < block)
+ block = buddy;
+ ++order;
+ block->order = order;
+ }
+
+ /* Add the (possibly coalesced) block to the appropriate free list */
+ block->order = order;
+ mark_available(mp, block);
+ list_add(&block->link, &mp->avail[order]);
+}
+
+
+/**
+ * Dumps the state of a buddy system memory allocator object to the console.
+ */
+void
+buddy_dump_mempool(struct buddy_mempool *mp)
+{
+ unsigned long i;
+ unsigned long num_blocks;
+ struct list_head *entry;
+
+ printk(KERN_DEBUG "DUMP OF BUDDY MEMORY POOL:\n");
+
+ for (i = mp->min_order; i <= mp->pool_order; i++) {
+
+ /* Count the number of memory blocks in the list */
+ num_blocks = 0;
+ list_for_each(entry, &mp->avail[i])
+ ++num_blocks;
+
+ printk(KERN_DEBUG " order %2lu: %lu free blocks\n", i, num_blocks);
+ }
+}
+
+
--- /dev/null
+/* Copyright (c) 2007, Sandia National Laboratories */
+
+#include <lwk/kernel.h>
+#include <lwk/buddy.h>
+#include <lwk/log2.h>
+
+
+/**
+ * This specifies the minimum sized memory block to request from the underlying
+ * buddy system memory allocator, 2^MIN_ORDER bytes. It must be at least big
+ * enough to hold a 'struct kmem_block_hdr'.
+ */
+#define MIN_ORDER 5 /* 32 bytes */
+
+
+/**
+ * Magic value used for sanity checking. Every block of memory allocated via
+ * kmem_alloc() has this value in its block header.
+ */
+#define KMEM_MAGIC 0xF0F0F0F0F0F0F0F0UL
+
+
+/**
+ * The kernel memory pool. This manages all memory available for dynamic
+ * allocation by the kernel. The kernel reserves some amount of memory
+ * (e.g., the first 8 MB, amount specifiable on kernel boot command line) for
+ * its own use, included in which is the kernel memory pool. The rest of memory
+ * is reserved for user applications.
+ *
+ * NOTE: There is currently only one buddy_mempool for the entire system.
+ * This may change to one per NUMA node in the future.
+ */
+static struct buddy_mempool *kmem = NULL;
+
+
+/**
+ * Total number of bytes in the kernel memory pool.
+ */
+static unsigned long kmem_bytes_managed;
+
+
+/**
+ * Total number of bytes allocated from the kernel memory pool.
+ */
+static unsigned long kmem_bytes_allocated;
+
+
+/**
+ * Each block of memory allocated from the kernel memory pool has one of these
+ * structures at its head. The structure contains information needed to free
+ * the block and return it to the underlying memory allocator.
+ *
+ * When a block is allocated, the address returned to the caller is
+ * sizeof(struct kmem_block_hdr) bytes greater than the block allocated from
+ * the underlying memory allocator.
+ *
+ * WARNING: This structure is defined to be exactly 16 bytes in size.
+ * Do not change this unless you really know what you are doing.
+ */
+struct kmem_block_hdr {
+ uint64_t order; /* order of the block allocated from buddy system */
+ uint64_t magic; /* magic value used as sanity check */
+} __attribute__((packed));
+
+
+/**
+ * This adds a zone to the kernel memory pool. Zones exist to allow there to be
+ * multiple non-adjacent regions of physically contiguous memory. The
+ * bookkeeping needed to cover the gaps would waste a lot of memory and have no
+ * benefit.
+ *
+ * Arguments:
+ * [IN] base_addr: Base address of the memory pool.
+ * [IN] size: Size of the memory pool in bytes.
+ *
+ * NOTE: Currently only one zone is supported. Calling kmem_create_zone() more
+ * than once will result in a panic.
+ */
+void
+kmem_create_zone(unsigned long base_addr, size_t size)
+{
+ unsigned long pool_order = ilog2(roundup_pow_of_two(size));
+ unsigned long min_order = MIN_ORDER;
+
+ /* For now, protect against calling kmem_create_zone() more than once */
+ BUG_ON(kmem != NULL);
+
+ /* Initialize the underlying buddy allocator */
+ if ((kmem = buddy_init(base_addr, pool_order, min_order)) == NULL)
+ panic("buddy_init() failed.");
+}
+
+
+/**
+ * This adds memory to the kernel memory pool. The memory region being added
+ * must fall within a zone previously specified via kmem_create_zone().
+ *
+ * Arguments:
+ * [IN] base_addr: Base address of the memory region to add.
+ * [IN] size: Size of the memory region in bytes.
+ */
+void
+kmem_add_memory(unsigned long base_addr, size_t size)
+{
+ /*
+ * kmem buddy allocator is initially empty.
+ * Memory is added to it via buddy_free().
+ * buddy_free() will panic if there are any problems with the args.
+ */
+ buddy_free(kmem, (void *)base_addr, ilog2(size));
+
+ /* Update statistics */
+ kmem_bytes_managed += size;
+}
+
+
+/**
+ * Allocates memory from the kernel memory pool. This will return a memory
+ * region that is at least 16-byte aligned. The memory returned is zeroed.
+ *
+ * Arguments:
+ * [IN] size: Amount of memory to allocate in bytes.
+ *
+ * Returns:
+ * Success: Pointer to the start of the allocated memory.
+ * Failure: NULL
+ */
+void *
+kmem_alloc(size_t size)
+{
+ unsigned long order;
+ struct kmem_block_hdr *hdr;
+
+ /* Make room for block header */
+ size += sizeof(struct kmem_block_hdr);
+
+ /* Calculate the block order needed */
+ order = ilog2(roundup_pow_of_two(size));
+ if (order < MIN_ORDER)
+ order = MIN_ORDER;
+
+ /* Allocate memory from the underlying buddy system */
+ if ((hdr = buddy_alloc(kmem, order)) == NULL)
+ return NULL;
+
+ /* Zero the block */
+ memset(hdr, 0, (1UL << order));
+
+ /* Initialize the block header */
+ hdr->order = order; /* kmem_free() needs this to free the block */
+ hdr->magic = KMEM_MAGIC; /* used for sanity check */
+
+ /* Update statistics */
+ kmem_bytes_allocated += (1UL << order);
+
+ /* Return address of first byte after block header to caller */
+ return hdr + 1;
+}
+
+
+/**
+ * Frees memory previously allocated with kmem_alloc().
+ *
+ * Arguments:
+ * [IN] addr: Address of the memory region to free.
+ *
+ * NOTE: The size of the memory region being freed is assumed to be in a
+ * 'struct kmem_block_hdr' header located immediately before the address
+ * passed in by the caller. This header is created and initialized by
+ * kmem_alloc().
+ */
+void
+kmem_free(void *addr)
+{
+ struct kmem_block_hdr *hdr;
+
+ BUG_ON((unsigned long)addr < sizeof(struct kmem_block_hdr));
+
+ /* Find the block header */
+ hdr = (struct kmem_block_hdr *)addr - 1;
+ BUG_ON(hdr->magic != KMEM_MAGIC);
+
+ /* Return block to the underlying buddy system */
+ buddy_free(kmem, hdr, hdr->order);
+
+ kmem_bytes_allocated -= (1 << hdr->order);
+}
+
+
+/**
+ * Allocates pages of memory from the kernel memory pool. The number of pages
+ * requested must be a power of two and the returned pages will be contiguous
+ * in physical memory. The memory returned is zeroed.
+ *
+ * Arguments:
+ * [IN] order: Number of pages to allocated, 2^order:
+ * 0 = 1 page
+ * 1 = 2 pages
+ * 2 = 4 pages
+ * 3 = 8 pages
+ * ...
+ * Returns:
+ * Success: Pointer to the start of the allocated memory.
+ * Failure: NULL
+ */
+void *
+kmem_get_pages(unsigned long order)
+{
+ unsigned long block_order;
+ void *addr;
+
+ /* Calculate the block size needed; convert page order to byte order */
+ block_order = order + ilog2(PAGE_SIZE);
+
+ /* Allocate memory from the underlying buddy system */
+ if ((addr = buddy_alloc(kmem, block_order)) == NULL)
+ return NULL;
+
+ /* Zero the block and return its address */
+ memset(addr, 0, (1UL << block_order));
+ return addr;
+}
+
+
+/**
+ * Frees pages of memory previously allocated with kmem_get_pages().
+ *
+ * Arguments:
+ * [IN] addr: Address of the memory region to free.
+ * [IN] order: Number of pages to free, 2^order.
+ * The order must match the value passed to kmem_get_pages()
+ * when the pages were allocated.
+ */
+void
+kmem_free_pages(void *addr, unsigned long order)
+{
+ buddy_free(kmem, addr, order + ilog2(PAGE_SIZE));
+}
+
+
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <lwk/kernel.h>
+#include <lwk/spinlock.h>
+#include <lwk/string.h>
+#include <lwk/list.h>
+#include <lwk/log2.h>
+#include <lwk/pmem.h>
+#include <arch/uaccess.h>
+
+static LIST_HEAD(pmem_list);
+static DEFINE_SPINLOCK(pmem_list_lock);
+
+struct pmem_list_entry {
+ struct list_head link;
+ struct pmem_region rgn;
+};
+
+static struct pmem_list_entry *
+alloc_pmem_list_entry(void)
+{
+ return kmem_alloc(sizeof(struct pmem_list_entry));
+}
+
+static void
+free_pmem_list_entry(struct pmem_list_entry *entry)
+{
+ kmem_free(entry);
+}
+
+static bool
+calc_overlap(const struct pmem_region *a, const struct pmem_region *b,
+ struct pmem_region *dst)
+{
+ if (!((a->start < b->end) && (a->end > b->start)))
+ return false;
+
+ if (dst) {
+ dst->start = max(a->start, b->start);
+ dst->end = min(a->end, b->end);
+ }
+
+ return true;
+}
+
+static bool
+regions_overlap(const struct pmem_region *a, const struct pmem_region *b)
+{
+ return calc_overlap(a, b, NULL);
+}
+
+static bool
+region_is_unique(const struct pmem_region *rgn)
+{
+ struct pmem_list_entry *entry;
+
+ list_for_each_entry(entry, &pmem_list, link) {
+ if (regions_overlap(rgn, &entry->rgn))
+ return false;
+ }
+ return true;
+}
+
+static bool
+region_is_sane(const struct pmem_region *rgn)
+{
+ int i;
+
+ if (!rgn)
+ return false;
+
+ if (rgn->end <= rgn->start)
+ return false;
+
+ for (i = 0; i < sizeof(rgn->name); i++) {
+ if (rgn->name[i] == '\0')
+ break;
+ }
+ if (i == sizeof(rgn->name))
+ return false;
+
+ return true;
+}
+
+static bool
+region_is_known(const struct pmem_region *rgn)
+{
+ struct pmem_list_entry *entry;
+ struct pmem_region overlap;
+ size_t size;
+
+ size = rgn->end - rgn->start;
+ list_for_each_entry(entry, &pmem_list, link) {
+ if (!calc_overlap(rgn, &entry->rgn, &overlap))
+ continue;
+
+ size -= (overlap.end - overlap.start);
+ }
+
+ return (size == 0) ? true : false;
+}
+
+static void
+insert_pmem_list_entry(struct pmem_list_entry *entry)
+{
+ struct list_head *pos;
+ struct pmem_list_entry *cur;
+
+ /* Locate the entry that the new entry should be inserted before */
+ list_for_each(pos, &pmem_list) {
+ cur = list_entry(pos, struct pmem_list_entry, link);
+ if (cur->rgn.start > entry->rgn.start)
+ break;
+ }
+ list_add_tail(&entry->link, pos);
+}
+
+static bool
+regions_are_mergeable(const struct pmem_region *a, const struct pmem_region *b)
+{
+ if ((a->end != b->start) && (b->end != a->start))
+ return false;
+
+ if (a->type_is_set != b->type_is_set)
+ return false;
+ if (a->type_is_set && (a->type != b->type))
+ return false;
+
+ if (a->lgroup_is_set != b->lgroup_is_set)
+ return false;
+ if (a->lgroup_is_set && (a->lgroup != b->lgroup))
+ return false;
+
+ if (a->allocated_is_set != b->allocated_is_set)
+ return false;
+ if (a->allocated_is_set && (a->allocated != b->allocated))
+ return false;
+
+ if (a->name_is_set != b->name_is_set)
+ return false;
+ if (a->name_is_set && !strcmp(a->name, b->name))
+ return false;
+
+ return true;
+}
+
+static bool
+region_matches(const struct pmem_region *query, const struct pmem_region *rgn)
+{
+ if (!regions_overlap(query, rgn))
+ return false;
+
+ if (query->type_is_set
+ && (!rgn->type_is_set || (rgn->type != query->type)))
+ return false;
+
+ if (query->lgroup_is_set
+ && (!rgn->lgroup_is_set || (rgn->lgroup != query->lgroup)))
+ return false;
+
+ if (query->allocated_is_set
+ && (!rgn->allocated_is_set || (rgn->allocated != query->allocated)))
+ return false;
+
+ if (query->name_is_set
+ && (!rgn->name_is_set || strcmp(rgn->name, query->name)))
+ return false;
+
+ return true;
+}
+
+static void
+merge_pmem_list(void)
+{
+ struct pmem_list_entry *entry, *prev, *tmp;
+
+ prev = NULL;
+ list_for_each_entry_safe(entry, tmp, &pmem_list, link) {
+ if (prev && regions_are_mergeable(&prev->rgn, &entry->rgn)) {
+ prev->rgn.end = entry->rgn.end;
+ list_del(&entry->link);
+ free_pmem_list_entry(entry);
+ } else {
+ prev = entry;
+ }
+ }
+}
+
+static void
+zero_pmem(const struct pmem_region *rgn)
+{
+ /* access pmem region via the kernel's identity map */
+ memset(__va(rgn->start), 0, rgn->end - rgn->start);
+}
+
+static int
+__pmem_add(const struct pmem_region *rgn)
+{
+ struct pmem_list_entry *entry;
+
+ if (!region_is_sane(rgn))
+ return -EINVAL;
+
+ if (!region_is_unique(rgn))
+ return -EEXIST;
+
+ if (!(entry = alloc_pmem_list_entry()))
+ return -ENOMEM;
+
+ entry->rgn = *rgn;
+
+ insert_pmem_list_entry(entry);
+ merge_pmem_list();
+
+ return 0;
+}
+
+int
+pmem_add(const struct pmem_region *rgn)
+{
+ int status;
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&pmem_list_lock, irqstate);
+ status = __pmem_add(rgn);
+ spin_unlock_irqrestore(&pmem_list_lock, irqstate);
+
+ return status;
+}
+
+int
+sys_pmem_add(const struct pmem_region __user *rgn)
+{
+ struct pmem_region _rgn;
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if (copy_from_user(&_rgn, rgn, sizeof(_rgn)))
+ return -EINVAL;
+
+ return pmem_add(&_rgn);
+}
+
+static int
+__pmem_update(const struct pmem_region *update, bool umem_only)
+{
+ struct pmem_list_entry *entry, *head, *tail;
+ struct pmem_region overlap;
+
+ if (!region_is_sane(update))
+ return -EINVAL;
+
+ if (!region_is_known(update))
+ return -ENOENT;
+
+ list_for_each_entry(entry, &pmem_list, link) {
+ if (!calc_overlap(update, &entry->rgn, &overlap))
+ continue;
+
+ /* Jail user-space to PMEM_TYPE_UMEM regions */
+ if (umem_only) {
+ if (!entry->rgn.type_is_set
+ || (entry->rgn.type != PMEM_TYPE_UMEM))
+ return -EPERM;
+ if (!update->type_is_set
+ || (update->type != PMEM_TYPE_UMEM))
+ return -EPERM;
+ }
+
+ /* Handle head of entry non-overlap */
+ if (entry->rgn.start < overlap.start) {
+ if (!(head = alloc_pmem_list_entry()))
+ return -ENOMEM;
+ head->rgn = entry->rgn;
+ head->rgn.end = overlap.start;
+ list_add_tail(&head->link, &entry->link);
+ }
+
+ /* Handle tail of entry non-overlap */
+ if (entry->rgn.end > overlap.end) {
+ if (!(tail = alloc_pmem_list_entry()))
+ return -ENOMEM;
+ tail->rgn = entry->rgn;
+ tail->rgn.start = overlap.end;
+ list_add(&tail->link, &entry->link);
+ }
+
+ /* Update entry to reflect the overlap */
+ entry->rgn = *update;
+ entry->rgn.start = overlap.start;
+ entry->rgn.end = overlap.end;
+ }
+
+ merge_pmem_list();
+
+ return 0;
+}
+
+static int
+_pmem_update(const struct pmem_region *update, bool umem_only)
+{
+ int status;
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&pmem_list_lock, irqstate);
+ status = __pmem_update(update, umem_only);
+ spin_unlock_irqrestore(&pmem_list_lock, irqstate);
+
+ return status;
+}
+
+int
+pmem_update(const struct pmem_region *update)
+{
+ return _pmem_update(update, false);
+}
+
+int
+sys_pmem_update(const struct pmem_region __user *update)
+{
+ struct pmem_region _update;
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if (copy_from_user(&_update, update, sizeof(_update)))
+ return -EINVAL;
+
+ return _pmem_update(&_update, true);
+}
+
+static int
+__pmem_query(const struct pmem_region *query, struct pmem_region *result)
+{
+ struct pmem_list_entry *entry;
+ struct pmem_region *rgn;
+
+ if (!region_is_sane(query))
+ return -EINVAL;
+
+ list_for_each_entry(entry, &pmem_list, link) {
+ rgn = &entry->rgn;
+ if (!region_matches(query, rgn))
+ continue;
+
+ /* match found, update result */
+ if (result) {
+ *result = *rgn;
+ calc_overlap(query, rgn, result);
+ }
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int
+pmem_query(const struct pmem_region *query, struct pmem_region *result)
+{
+ int status;
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&pmem_list_lock, irqstate);
+ status = __pmem_query(query, result);
+ spin_unlock_irqrestore(&pmem_list_lock, irqstate);
+
+ return status;
+}
+
+int
+sys_pmem_query(const struct pmem_region __user *query,
+ struct pmem_region __user *result)
+{
+ struct pmem_region _query, _result;
+ int status;
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if (copy_from_user(&_query, query, sizeof(_query)))
+ return -EINVAL;
+
+ if ((status = pmem_query(&_query, &_result)) != 0)
+ return status;
+
+ if (result && copy_to_user(result, &_result, sizeof(*result)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+__pmem_alloc(size_t size, size_t alignment,
+ const struct pmem_region *constraint,
+ struct pmem_region *result)
+{
+ int status;
+ struct pmem_region query;
+ struct pmem_region candidate;
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (alignment && !is_power_of_2(alignment))
+ return -EINVAL;
+
+ if (!region_is_sane(constraint))
+ return -EINVAL;
+
+ if (constraint->allocated_is_set && constraint->allocated)
+ return -EINVAL;
+
+ query = *constraint;
+
+ while ((status = __pmem_query(&query, &candidate)) == 0) {
+ if (alignment) {
+ candidate.start = round_up(candidate.start, alignment);
+ if (candidate.start >= candidate.end)
+ continue;
+ }
+
+ if ((candidate.end - candidate.start) >= size) {
+ candidate.end = candidate.start + size;
+ candidate.allocated_is_set = true;
+ candidate.allocated = true;
+ status = __pmem_update(&candidate, false);
+ BUG_ON(status);
+ zero_pmem(&candidate);
+ if (result)
+ *result = candidate;
+ return 0;
+ }
+
+ query.start = candidate.end;
+ }
+ BUG_ON(status != -ENOENT);
+
+ return -ENOMEM;
+}
+
+int
+pmem_alloc(size_t size, size_t alignment,
+ const struct pmem_region *constraint,
+ struct pmem_region *result)
+{
+ int status;
+ unsigned long irqstate;
+
+ spin_lock_irqsave(&pmem_list_lock, irqstate);
+ status = __pmem_alloc(size, alignment, constraint, result);
+ spin_unlock_irqrestore(&pmem_list_lock, irqstate);
+
+ return status;
+}
+
+int
+sys_pmem_alloc(size_t size, size_t alignment,
+ const struct pmem_region __user *constraint,
+ struct pmem_region __user *result)
+{
+ struct pmem_region _constraint, _result;
+ int status;
+
+ if (current->uid != 0)
+ return -EPERM;
+
+ if (copy_from_user(&_constraint, constraint, sizeof(_constraint)))
+ return -EINVAL;
+
+ if ((status = pmem_alloc(size, alignment, &_constraint, &_result)) != 0)
+ return status;
+
+ if (result && copy_to_user(result, &_result, sizeof(*result)))
+ return -EINVAL;
+
+ return 0;
+}
--- /dev/null
+../user/liblwk/pmem.c
\ No newline at end of file
--- /dev/null
+####
+# kbuild: Generic definitions
+
+# Convinient variables
+comma := ,
+squote := '
+empty :=
+space := $(empty) $(empty)
+
+###
+# The temporary file to save gcc -MD generated dependencies must not
+# contain a comma
+depfile = $(subst $(comma),_,$(@D)/.$(@F).d)
+
+###
+# Escape single quote for use in echo statements
+escsq = $(subst $(squote),'\$(squote)',$1)
+
+###
+# filechk is used to check if the content of a generated file is updated.
+# Sample usage:
+# define filechk_sample
+# echo $KERNELRELEASE
+# endef
+# version.h : Makefile
+# $(call filechk,sample)
+# The rule defined shall write to stdout the content of the new file.
+# The existing file will be compared with the new one.
+# - If no file exist it is created
+# - If the content differ the new file is used
+# - If they are equal no change, and no timestamp update
+# - stdin is piped in from the first prerequisite ($<) so one has
+# to specify a valid file as first prerequisite (often the kbuild file)
+define filechk
+ $(Q)set -e; \
+ echo ' CHK $@'; \
+ mkdir -p $(dir $@); \
+ $(filechk_$(1)) < $< > $@.tmp; \
+ if [ -r $@ ] && cmp -s $@ $@.tmp; then \
+ rm -f $@.tmp; \
+ else \
+ echo ' UPD $@'; \
+ mv -f $@.tmp $@; \
+ fi
+endef
+
+######
+# gcc support functions
+# See documentation in Documentation/kbuild/makefiles.txt
+
+# as-option
+# Usage: cflags-y += $(call as-option, -Wa$(comma)-isa=foo,)
+
+as-option = $(shell if $(CC) $(CFLAGS) $(1) -Wa,-Z -c -o /dev/null \
+ -xassembler /dev/null > /dev/null 2>&1; then echo "$(1)"; \
+ else echo "$(2)"; fi ;)
+
+# cc-option
+# Usage: cflags-y += $(call cc-option, -march=winchip-c6, -march=i586)
+
+cc-option = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \
+ > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi ;)
+
+# cc-option-yn
+# Usage: flag := $(call cc-option-yn, -march=winchip-c6)
+cc-option-yn = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \
+ > /dev/null 2>&1; then echo "y"; else echo "n"; fi;)
+
+# cc-option-align
+# Prefix align with either -falign or -malign
+cc-option-align = $(subst -functions=0,,\
+ $(call cc-option,-falign-functions=0,-malign-functions=0))
+
+# cc-version
+# Usage gcc-ver := $(call cc-version, $(CC))
+cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh \
+ $(if $(1), $(1), $(CC)))
+
+# cc-ifversion
+# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
+cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \
+ echo $(3); fi;)
+
+###
+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj=
+# Usage:
+# $(Q)$(MAKE) $(build)=dir
+build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
+
+# Prefix -I with $(srctree) if it is not an absolute path
+addtree = $(if $(filter-out -I/%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1))) $(1)
+# Find all -I options and call addtree
+flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
+
+# If quiet is set, only print short version of command
+cmd = @$(echo-cmd) $(cmd_$(1))
+
+# Add $(obj)/ for paths that is not absolute
+objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
+
+###
+# if_changed - execute command if any prerequisite is newer than
+# target, or command line has changed
+# if_changed_dep - as if_changed, but uses fixdep to reveal dependencies
+# including used config symbols
+# if_changed_rule - as if_changed but execute rule instead
+# See Documentation/kbuild/makefiles.txt for more info
+
+ifneq ($(KBUILD_NOCMDDEP),1)
+# Check if both arguments has same arguments. Result in empty string if equal
+# User may override this check using make KBUILD_NOCMDDEP=1
+arg-check = $(strip $(filter-out $(1), $(2)) $(filter-out $(2), $(1)) )
+endif
+
+# echo command. Short version is $(quiet) equals quiet, otherwise full command
+echo-cmd = $(if $($(quiet)cmd_$(1)), \
+ echo ' $(call escsq,$($(quiet)cmd_$(1)))';)
+
+make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
+
+# function to only execute the passed command if necessary
+# >'< substitution is for echo to work, >$< substitution to preserve $ when reloading .cmd file
+# note: when using inline perl scripts [perl -e '...$$t=1;...'] in $(cmd_xxx) double $$ your perl vars
+#
+if_changed = $(if $(strip $(filter-out $(PHONY),$?) \
+ $(call arg-check, $(cmd_$(1)), $(cmd_$@)) ), \
+ @set -e; \
+ $(echo-cmd) $(cmd_$(1)); \
+ echo 'cmd_$@ := $(make-cmd)' > $(@D)/.$(@F).cmd)
+
+# execute the command and also postprocess generated .d dependencies
+# file
+if_changed_dep = $(if $(strip $(filter-out $(PHONY),$?) \
+ $(filter-out FORCE $(wildcard $^),$^) \
+ $(call arg-check, $(cmd_$(1)), $(cmd_$@)) ), \
+ @set -e; \
+ $(echo-cmd) $(cmd_$(1)); \
+ scripts/basic/fixdep $(depfile) $@ '$(make-cmd)' > $(@D)/.$(@F).tmp; \
+ rm -f $(depfile); \
+ mv -f $(@D)/.$(@F).tmp $(@D)/.$(@F).cmd)
+
+# Usage: $(call if_changed_rule,foo)
+# will check if $(cmd_foo) changed, or any of the prequisites changed,
+# and if so will execute $(rule_foo)
+if_changed_rule = $(if $(strip $(filter-out $(PHONY),$?) \
+ $(call arg-check, $(cmd_$(1)), $(cmd_$@)) ),\
+ @set -e; \
+ $(rule_$(1)))
--- /dev/null
+###
+# scripts contains sources for various helper programs used throughout
+# the kernel for the build process.
+# ---------------------------------------------------------------------------
+# kallsyms: Find all symbols in vmlinux
+# pnmttologo: Convert pnm files to logo files
+# conmakehash: Create chartable
+# conmakehash: Create arrays for initializing the kernel console tables
+
+hostprogs-$(CONFIG_KALLSYMS) += kallsyms
+hostprogs-$(CONFIG_LOGO) += pnmtologo
+hostprogs-$(CONFIG_VT) += conmakehash
+hostprogs-$(CONFIG_PROM_CONSOLE) += conmakehash
+hostprogs-$(CONFIG_IKCONFIG) += bin2c
+
+always := $(hostprogs-y)
+
+#subdir-$(CONFIG_MODVERSIONS) += genksyms
+#subdir-$(CONFIG_MODULES) += mod
+
+# Let clean descend into subdirs
+subdir- += basic kconfig
--- /dev/null
+# ==========================================================================
+# Building
+# ==========================================================================
+
+src := $(obj)
+
+PHONY := __build
+__build:
+
+# Read .config if it exist, otherwise ignore
+-include .config
+
+include scripts/Kbuild.include
+
+# The filename Kbuild has precedence over Makefile
+kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
+include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+
+include scripts/Makefile.lib
+
+ifdef host-progs
+ifneq ($(hostprogs-y),$(host-progs))
+$(warning kbuild: $(obj)/Makefile - Usage of host-progs is deprecated. Please replace with hostprogs-y!)
+hostprogs-y += $(host-progs)
+endif
+endif
+
+# Do not include host rules unles needed
+ifneq ($(hostprogs-y)$(hostprogs-m),)
+include scripts/Makefile.host
+endif
+
+ifneq ($(KBUILD_SRC),)
+# Create output directory if not already present
+_dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
+
+# Create directories for object files if directory does not exist
+# Needed when obj-y := dir/file.o syntax is used
+_dummy := $(foreach d,$(obj-dirs), $(shell [ -d $(d) ] || mkdir -p $(d)))
+endif
+
+
+ifdef EXTRA_TARGETS
+$(warning kbuild: $(obj)/Makefile - Usage of EXTRA_TARGETS is obsolete in 2.6. Please fix!)
+endif
+
+ifdef build-targets
+$(warning kbuild: $(obj)/Makefile - Usage of build-targets is obsolete in 2.6. Please fix!)
+endif
+
+ifdef export-objs
+$(warning kbuild: $(obj)/Makefile - Usage of export-objs is obsolete in 2.6. Please fix!)
+endif
+
+ifdef O_TARGET
+$(warning kbuild: $(obj)/Makefile - Usage of O_TARGET := $(O_TARGET) is obsolete in 2.6. Please fix!)
+endif
+
+ifdef L_TARGET
+$(error kbuild: $(obj)/Makefile - Use of L_TARGET is replaced by lib-y in 2.6. Please fix!)
+endif
+
+ifdef list-multi
+$(warning kbuild: $(obj)/Makefile - list-multi := $(list-multi) is obsolete in 2.6. Please fix!)
+endif
+
+ifndef obj
+$(warning kbuild: Makefile.build is included improperly)
+endif
+
+# ===========================================================================
+
+ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),)
+lib-target := $(obj)/lib.a
+endif
+
+ifneq ($(strip $(obj-y) $(obj-m) $(obj-n) $(obj-) $(lib-target)),)
+builtin-target := $(obj)/built-in.o
+endif
+
+# We keep a list of all modules in $(MODVERDIR)
+
+__build: $(if $(KBUILD_BUILTIN),$(builtin-target) $(lib-target) $(extra-y)) \
+ $(if $(KBUILD_MODULES),$(obj-m)) \
+ $(subdir-ym) $(always)
+ @:
+
+# Linus' kernel sanity checking tool
+ifneq ($(KBUILD_CHECKSRC),0)
+ ifeq ($(KBUILD_CHECKSRC),2)
+ quiet_cmd_force_checksrc = CHECK $<
+ cmd_force_checksrc = $(CHECK) $(CHECKFLAGS) $(c_flags) $< ;
+ else
+ quiet_cmd_checksrc = CHECK $<
+ cmd_checksrc = $(CHECK) $(CHECKFLAGS) $(c_flags) $< ;
+ endif
+endif
+
+
+# Compile C sources (.c)
+# ---------------------------------------------------------------------------
+
+# Default is built-in, unless we know otherwise
+modkern_cflags := $(CFLAGS_KERNEL)
+quiet_modtag := $(empty) $(empty)
+
+$(real-objs-m) : modkern_cflags := $(CFLAGS_MODULE)
+$(real-objs-m:.o=.i) : modkern_cflags := $(CFLAGS_MODULE)
+$(real-objs-m:.o=.s) : modkern_cflags := $(CFLAGS_MODULE)
+$(real-objs-m:.o=.lst): modkern_cflags := $(CFLAGS_MODULE)
+
+$(real-objs-m) : quiet_modtag := [M]
+$(real-objs-m:.o=.i) : quiet_modtag := [M]
+$(real-objs-m:.o=.s) : quiet_modtag := [M]
+$(real-objs-m:.o=.lst): quiet_modtag := [M]
+
+$(obj-m) : quiet_modtag := [M]
+
+# Default for not multi-part modules
+modname = $(*F)
+
+$(multi-objs-m) : modname = $(modname-multi)
+$(multi-objs-m:.o=.i) : modname = $(modname-multi)
+$(multi-objs-m:.o=.s) : modname = $(modname-multi)
+$(multi-objs-m:.o=.lst) : modname = $(modname-multi)
+$(multi-objs-y) : modname = $(modname-multi)
+$(multi-objs-y:.o=.i) : modname = $(modname-multi)
+$(multi-objs-y:.o=.s) : modname = $(modname-multi)
+$(multi-objs-y:.o=.lst) : modname = $(modname-multi)
+
+quiet_cmd_cc_s_c = CC $(quiet_modtag) $@
+cmd_cc_s_c = $(CC) $(c_flags) -fverbose-asm -S -o $@ $<
+
+%.s: %.c FORCE
+ $(call if_changed_dep,cc_s_c)
+
+quiet_cmd_cc_i_c = CPP $(quiet_modtag) $@
+cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $<
+
+%.i: %.c FORCE
+ $(call if_changed_dep,cc_i_c)
+
+# C (.c) files
+# The C file is compiled and updated dependency information is generated.
+# (See cmd_cc_o_c + relevant part of rule_cc_o_c)
+
+quiet_cmd_cc_o_c = CC $(quiet_modtag) $@
+
+ifndef CONFIG_MODVERSIONS
+cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
+
+else
+# When module versioning is enabled the following steps are executed:
+# o compile a .tmp_<file>.o from <file>.c
+# o if .tmp_<file>.o doesn't contain a __ksymtab version, i.e. does
+# not export symbols, we just rename .tmp_<file>.o to <file>.o and
+# are done.
+# o otherwise, we calculate symbol versions using the good old
+# genksyms on the preprocessed source and postprocess them in a way
+# that they are usable as a linker script
+# o generate <file>.o from .tmp_<file>.o using the linker to
+# replace the unresolved symbols __crc_exported_symbol with
+# the actual value of the checksum generated by genksyms
+
+cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
+cmd_modversions = \
+ if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
+ $(CPP) -D__GENKSYMS__ $(c_flags) $< \
+ | $(GENKSYMS) -a $(ARCH) \
+ > $(@D)/.tmp_$(@F:.o=.ver); \
+ \
+ $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
+ -T $(@D)/.tmp_$(@F:.o=.ver); \
+ rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \
+ else \
+ mv -f $(@D)/.tmp_$(@F) $@; \
+ fi;
+endif
+
+define rule_cc_o_c
+ $(call echo-cmd,checksrc) $(cmd_checksrc) \
+ $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \
+ $(cmd_modversions) \
+ scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > $(@D)/.$(@F).tmp; \
+ rm -f $(depfile); \
+ mv -f $(@D)/.$(@F).tmp $(@D)/.$(@F).cmd
+endef
+
+# Built-in and composite module parts
+
+%.o: %.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+
+# Single-part modules are special since we need to mark them in $(MODVERDIR)
+
+$(single-used-m): %.o: %.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+ @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod)
+
+quiet_cmd_cc_lst_c = MKLST $@
+ cmd_cc_lst_c = $(CC) $(c_flags) -g -c -o $*.o $< && \
+ $(CONFIG_SHELL) $(srctree)/scripts/makelst $*.o \
+ System.map $(OBJDUMP) > $@
+
+%.lst: %.c FORCE
+ $(call if_changed_dep,cc_lst_c)
+
+# Compile assembler sources (.S)
+# ---------------------------------------------------------------------------
+
+modkern_aflags := $(AFLAGS_KERNEL)
+
+$(real-objs-m) : modkern_aflags := $(AFLAGS_MODULE)
+$(real-objs-m:.o=.s): modkern_aflags := $(AFLAGS_MODULE)
+
+quiet_cmd_as_s_S = CPP $(quiet_modtag) $@
+cmd_as_s_S = $(CPP) $(a_flags) -o $@ $<
+
+%.s: %.S FORCE
+ $(call if_changed_dep,as_s_S)
+
+quiet_cmd_as_o_S = AS $(quiet_modtag) $@
+cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
+
+%.o: %.S FORCE
+ $(call if_changed_dep,as_o_S)
+
+targets += $(real-objs-y) $(real-objs-m) $(lib-y)
+targets += $(extra-y) $(MAKECMDGOALS) $(always)
+
+# Linker scripts preprocessor (.lds.S -> .lds)
+# ---------------------------------------------------------------------------
+quiet_cmd_cpp_lds_S = LDS $@
+ cmd_cpp_lds_S = $(CPP) $(cpp_flags) -D__ASSEMBLY__ -o $@ $<
+
+%.lds: %.lds.S FORCE
+ $(call if_changed_dep,cpp_lds_S)
+
+# Build the compiled-in targets
+# ---------------------------------------------------------------------------
+
+# To build objects in subdirs, we need to descend into the directories
+$(sort $(subdir-obj-y)): $(subdir-ym) ;
+
+#
+# Rule to compile a set of .o files into one .o file
+#
+ifdef builtin-target
+quiet_cmd_link_o_target = LD $@
+# If the list of objects to link is empty, just create an empty built-in.o
+cmd_link_o_target = $(if $(strip $(obj-y)),\
+ $(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^),\
+ rm -f $@; $(AR) rcs $@)
+
+$(builtin-target): $(obj-y) FORCE
+ $(call if_changed,link_o_target)
+
+targets += $(builtin-target)
+endif # builtin-target
+
+#
+# Rule to compile a set of .o files into one .a file
+#
+ifdef lib-target
+quiet_cmd_link_l_target = AR $@
+cmd_link_l_target = rm -f $@; $(AR) $(EXTRA_ARFLAGS) rcs $@ $(lib-y)
+
+$(lib-target): $(lib-y) FORCE
+ $(call if_changed,link_l_target)
+
+targets += $(lib-target)
+endif
+
+#
+# Rule to link composite objects
+#
+# Composite objects are specified in kbuild makefile as follows:
+# <composite-object>-objs := <list of .o files>
+# or
+# <composite-object>-y := <list of .o files>
+link_multi_deps = \
+$(filter $(addprefix $(obj)/, \
+$($(subst $(obj)/,,$(@:.o=-objs))) \
+$($(subst $(obj)/,,$(@:.o=-y)))), $^)
+
+quiet_cmd_link_multi-y = LD $@
+cmd_link_multi-y = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps)
+
+quiet_cmd_link_multi-m = LD [M] $@
+cmd_link_multi-m = $(LD) $(ld_flags) $(LDFLAGS_MODULE) -o $@ $(link_multi_deps)
+
+# We would rather have a list of rules like
+# foo.o: $(foo-objs)
+# but that's not so easy, so we rather make all composite objects depend
+# on the set of all their parts
+$(multi-used-y) : %.o: $(multi-objs-y) FORCE
+ $(call if_changed,link_multi-y)
+
+$(multi-used-m) : %.o: $(multi-objs-m) FORCE
+ $(call if_changed,link_multi-m)
+ @{ echo $(@:.o=.ko); echo $(link_multi_deps); } > $(MODVERDIR)/$(@F:.o=.mod)
+
+targets += $(multi-used-y) $(multi-used-m)
+
+
+# Descending
+# ---------------------------------------------------------------------------
+
+PHONY += $(subdir-ym)
+$(subdir-ym):
+ $(Q)$(MAKE) $(build)=$@
+
+# Add FORCE to the prequisites of a target to force it to be always rebuilt.
+# ---------------------------------------------------------------------------
+
+PHONY += FORCE
+
+FORCE:
+
+# Read all saved command lines and dependencies for the $(targets) we
+# may be building above, using $(if_changed{,_dep}). As an
+# optimization, we don't need to read them if the target does not
+# exist, we will rebuild anyway in that case.
+
+targets := $(wildcard $(sort $(targets)))
+cmd_files := $(wildcard $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+
+ifneq ($(cmd_files),)
+ include $(cmd_files)
+endif
+
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable se we can use it in if_changed and friends.
+
+.PHONY: $(PHONY)
--- /dev/null
+# ==========================================================================
+# Cleaning up
+# ==========================================================================
+
+src := $(obj)
+
+PHONY := __clean
+__clean:
+
+# Shorthand for $(Q)$(MAKE) scripts/Makefile.clean obj=dir
+# Usage:
+# $(Q)$(MAKE) $(clean)=dir
+clean := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.clean obj
+
+# The filename Kbuild has precedence over Makefile
+kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
+include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+
+# Figure out what we need to build from the various variables
+# ==========================================================================
+
+__subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y)))
+subdir-y += $(__subdir-y)
+__subdir-m := $(patsubst %/,%,$(filter %/, $(obj-m)))
+subdir-m += $(__subdir-m)
+__subdir-n := $(patsubst %/,%,$(filter %/, $(obj-n)))
+subdir-n += $(__subdir-n)
+__subdir- := $(patsubst %/,%,$(filter %/, $(obj-)))
+subdir- += $(__subdir-)
+
+# Subdirectories we need to descend into
+
+subdir-ym := $(sort $(subdir-y) $(subdir-m))
+subdir-ymn := $(sort $(subdir-ym) $(subdir-n) $(subdir-))
+
+# Add subdir path
+
+subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
+
+# build a list of files to remove, usually releative to the current
+# directory
+
+__clean-files := $(extra-y) $(EXTRA_TARGETS) $(always) \
+ $(targets) $(clean-files) \
+ $(host-progs) \
+ $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
+
+# as clean-files is given relative to the current directory, this adds
+# a $(obj) prefix, except for absolute paths
+
+__clean-files := $(wildcard \
+ $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
+ $(filter /%, $(__clean-files)))
+
+# as clean-dirs is given relative to the current directory, this adds
+# a $(obj) prefix, except for absolute paths
+
+__clean-dirs := $(wildcard \
+ $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \
+ $(filter /%, $(clean-dirs)))
+
+# ==========================================================================
+
+quiet_cmd_clean = CLEAN $(obj)
+ cmd_clean = rm -f $(__clean-files)
+quiet_cmd_cleandir = CLEAN $(__clean-dirs)
+ cmd_cleandir = rm -rf $(__clean-dirs)
+
+
+__clean: $(subdir-ymn)
+ifneq ($(strip $(__clean-files)),)
+ +$(call cmd,clean)
+endif
+ifneq ($(strip $(__clean-dirs)),)
+ +$(call cmd,cleandir)
+endif
+ifneq ($(strip $(clean-rule)),)
+ +$(clean-rule)
+endif
+ @:
+
+
+# ===========================================================================
+# Generic stuff
+# ===========================================================================
+
+# Descending
+# ---------------------------------------------------------------------------
+
+PHONY += $(subdir-ymn)
+$(subdir-ymn):
+ $(Q)$(MAKE) $(clean)=$@
+
+# If quiet is set, only print short version of command
+
+cmd = @$(if $($(quiet)cmd_$(1)),echo ' $($(quiet)cmd_$(1))' &&) $(cmd_$(1))
+
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable se we can use it in if_changed and friends.
+
+.PHONY: $(PHONY)
--- /dev/null
+# ==========================================================================
+# Building binaries on the host system
+# Binaries are used during the compilation of the kernel, for example
+# to preprocess a data file.
+#
+# Both C and C++ is supported, but preferred language is C for such utilities.
+#
+# Samle syntax (see Documentation/kbuild/makefile.txt for reference)
+# hostprogs-y := bin2hex
+# Will compile bin2hex.c and create an executable named bin2hex
+#
+# hostprogs-y := lxdialog
+# lxdialog-objs := checklist.o lxdialog.o
+# Will compile lxdialog.c and checklist.c, and then link the executable
+# lxdialog, based on checklist.o and lxdialog.o
+#
+# hostprogs-y := qconf
+# qconf-cxxobjs := qconf.o
+# qconf-objs := menu.o
+# Will compile qconf as a C++ program, and menu as a C program.
+# They are linked as C++ code to the executable qconf
+
+# hostprogs-y := conf
+# conf-objs := conf.o libkconfig.so
+# libkconfig-objs := expr.o type.o
+# Will create a shared library named libkconfig.so that consist of
+# expr.o and type.o (they are both compiled as C code and the object file
+# are made as position independent code).
+# conf.c is compiled as a c program, and conf.o is linked together with
+# libkconfig.so as the executable conf.
+# Note: Shared libraries consisting of C++ files are not supported
+
+__hostprogs := $(sort $(hostprogs-y)$(hostprogs-m))
+
+# hostprogs-y := tools/build may have been specified. Retreive directory
+obj-dirs += $(foreach f,$(__hostprogs), $(if $(dir $(f)),$(dir $(f))))
+obj-dirs := $(strip $(sort $(filter-out ./,$(obj-dirs))))
+
+
+# C code
+# Executables compiled from a single .c file
+host-csingle := $(foreach m,$(__hostprogs),$(if $($(m)-objs),,$(m)))
+
+# C executables linked based on several .o files
+host-cmulti := $(foreach m,$(__hostprogs),\
+ $(if $($(m)-cxxobjs),,$(if $($(m)-objs),$(m))))
+
+# Object (.o) files compiled from .c files
+host-cobjs := $(sort $(foreach m,$(__hostprogs),$($(m)-objs)))
+
+# C++ code
+# C++ executables compiled from at least on .cc file
+# and zero or more .c files
+host-cxxmulti := $(foreach m,$(__hostprogs),$(if $($(m)-cxxobjs),$(m)))
+
+# C++ Object (.o) files compiled from .cc files
+host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
+
+# Shared libaries (only .c supported)
+# Shared libraries (.so) - all .so files referenced in "xxx-objs"
+host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
+# Remove .so files from "xxx-objs"
+host-cobjs := $(filter-out %.so,$(host-cobjs))
+
+#Object (.o) files used by the shared libaries
+host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
+
+__hostprogs := $(addprefix $(obj)/,$(__hostprogs))
+host-csingle := $(addprefix $(obj)/,$(host-csingle))
+host-cmulti := $(addprefix $(obj)/,$(host-cmulti))
+host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
+host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
+host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
+host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
+host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
+obj-dirs := $(addprefix $(obj)/,$(obj-dirs))
+
+#####
+# Handle options to gcc. Support building with separate output directory
+
+_hostc_flags = $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) $(HOSTCFLAGS_$(*F).o)
+_hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) $(HOSTCXXFLAGS_$(*F).o)
+
+ifeq ($(KBUILD_SRC),)
+__hostc_flags = $(_hostc_flags)
+__hostcxx_flags = $(_hostcxx_flags)
+else
+__hostc_flags = -I$(obj) $(call flags,_hostc_flags)
+__hostcxx_flags = -I$(obj) $(call flags,_hostcxx_flags)
+endif
+
+hostc_flags = -Wp,-MD,$(depfile) $(__hostc_flags)
+hostcxx_flags = -Wp,-MD,$(depfile) $(__hostcxx_flags)
+
+#####
+# Compile programs on the host
+
+# Create executable from a single .c file
+# host-csingle -> Executable
+quiet_cmd_host-csingle = HOSTCC $@
+ cmd_host-csingle = $(HOSTCC) $(hostc_flags) -o $@ $< \
+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-csingle): %: %.c FORCE
+ $(call if_changed_dep,host-csingle)
+
+# Link an executable based on list of .o files, all plain c
+# host-cmulti -> executable
+quiet_cmd_host-cmulti = HOSTLD $@
+ cmd_host-cmulti = $(HOSTCC) $(HOSTLDFLAGS) -o $@ \
+ $(addprefix $(obj)/,$($(@F)-objs)) \
+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cmulti): %: $(host-cobjs) $(host-cshlib) FORCE
+ $(call if_changed,host-cmulti)
+
+# Create .o file from a single .c file
+# host-cobjs -> .o
+quiet_cmd_host-cobjs = HOSTCC $@
+ cmd_host-cobjs = $(HOSTCC) $(hostc_flags) -c -o $@ $<
+$(host-cobjs): %.o: %.c FORCE
+ $(call if_changed_dep,host-cobjs)
+
+# Link an executable based on list of .o files, a mixture of .c and .cc
+# host-cxxmulti -> executable
+quiet_cmd_host-cxxmulti = HOSTLD $@
+ cmd_host-cxxmulti = $(HOSTCXX) $(HOSTLDFLAGS) -o $@ \
+ $(foreach o,objs cxxobjs,\
+ $(addprefix $(obj)/,$($(@F)-$(o)))) \
+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cxxmulti): %: $(host-cobjs) $(host-cxxobjs) $(host-cshlib) FORCE
+ $(call if_changed,host-cxxmulti)
+
+# Create .o file from a single .cc (C++) file
+quiet_cmd_host-cxxobjs = HOSTCXX $@
+ cmd_host-cxxobjs = $(HOSTCXX) $(hostcxx_flags) -c -o $@ $<
+$(host-cxxobjs): %.o: %.cc FORCE
+ $(call if_changed_dep,host-cxxobjs)
+
+# Compile .c file, create position independent .o file
+# host-cshobjs -> .o
+quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
+ cmd_host-cshobjs = $(HOSTCC) $(hostc_flags) -fPIC -c -o $@ $<
+$(host-cshobjs): %.o: %.c FORCE
+ $(call if_changed_dep,host-cshobjs)
+
+# Link a shared library, based on position independent .o files
+# *.o -> .so shared library (host-cshlib)
+quiet_cmd_host-cshlib = HOSTLLD -shared $@
+ cmd_host-cshlib = $(HOSTCC) $(HOSTLDFLAGS) -shared -o $@ \
+ $(addprefix $(obj)/,$($(@F:.so=-objs))) \
+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cshlib): %: $(host-cshobjs) FORCE
+ $(call if_changed,host-cshlib)
+
+targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
+ $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
+
--- /dev/null
+# Backward compatibility - to be removed...
+extra-y += $(EXTRA_TARGETS)
+# Figure out what we need to build from the various variables
+# ===========================================================================
+
+# When an object is listed to be built compiled-in and modular,
+# only build the compiled-in version
+
+obj-m := $(filter-out $(obj-y),$(obj-m))
+
+# Libraries are always collected in one lib file.
+# Filter out objects already built-in
+
+lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
+
+
+# Handle objects in subdirs
+# ---------------------------------------------------------------------------
+# o if we encounter foo/ in $(obj-y), replace it by foo/built-in.o
+# and add the directory to the list of dirs to descend into: $(subdir-y)
+# o if we encounter foo/ in $(obj-m), remove it from $(obj-m)
+# and add the directory to the list of dirs to descend into: $(subdir-m)
+
+__subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y)))
+subdir-y += $(__subdir-y)
+__subdir-m := $(patsubst %/,%,$(filter %/, $(obj-m)))
+subdir-m += $(__subdir-m)
+obj-y := $(patsubst %/, %/built-in.o, $(obj-y))
+obj-m := $(filter-out %/, $(obj-m))
+
+# Subdirectories we need to descend into
+
+subdir-ym := $(sort $(subdir-y) $(subdir-m))
+
+# if $(foo-objs) exists, foo.o is a composite object
+multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m))))
+multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m))))
+multi-used := $(multi-used-y) $(multi-used-m)
+single-used-m := $(sort $(filter-out $(multi-used-m),$(obj-m)))
+
+# Build list of the parts of our composite objects, our composite
+# objects depend on those (obviously)
+multi-objs-y := $(foreach m, $(multi-used-y), $($(m:.o=-objs)) $($(m:.o=-y)))
+multi-objs-m := $(foreach m, $(multi-used-m), $($(m:.o=-objs)) $($(m:.o=-y)))
+multi-objs := $(multi-objs-y) $(multi-objs-m)
+
+# $(subdir-obj-y) is the list of objects in $(obj-y) which do not live
+# in the local directory
+subdir-obj-y := $(foreach o,$(obj-y),$(if $(filter-out $(o),$(notdir $(o))),$(o)))
+
+# $(obj-dirs) is a list of directories that contain object files
+obj-dirs := $(dir $(multi-objs) $(subdir-obj-y))
+
+# Replace multi-part objects by their individual parts, look at local dir only
+real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
+real-objs-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m)))
+
+# Add subdir path
+
+extra-y := $(addprefix $(obj)/,$(extra-y))
+always := $(addprefix $(obj)/,$(always))
+targets := $(addprefix $(obj)/,$(targets))
+obj-y := $(addprefix $(obj)/,$(obj-y))
+obj-m := $(addprefix $(obj)/,$(obj-m))
+lib-y := $(addprefix $(obj)/,$(lib-y))
+subdir-obj-y := $(addprefix $(obj)/,$(subdir-obj-y))
+real-objs-y := $(addprefix $(obj)/,$(real-objs-y))
+real-objs-m := $(addprefix $(obj)/,$(real-objs-m))
+single-used-m := $(addprefix $(obj)/,$(single-used-m))
+multi-used-y := $(addprefix $(obj)/,$(multi-used-y))
+multi-used-m := $(addprefix $(obj)/,$(multi-used-m))
+multi-objs-y := $(addprefix $(obj)/,$(multi-objs-y))
+multi-objs-m := $(addprefix $(obj)/,$(multi-objs-m))
+subdir-ym := $(addprefix $(obj)/,$(subdir-ym))
+obj-dirs := $(addprefix $(obj)/,$(obj-dirs))
+
+# These flags are needed for modversions and compiling, so we define them here
+# already
+# $(modname_flags) #defines KBUILD_MODNAME as the name of the module it will
+# end up in (or would, if it gets compiled in)
+# Note: It's possible that one object gets potentially linked into more
+# than one module. In that case KBUILD_MODNAME will be set to foo_bar,
+# where foo and bar are the name of the modules.
+name-fix = $(subst $(comma),_,$(subst -,_,$1))
+basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(*F)))"
+modname_flags = $(if $(filter 1,$(words $(modname))),\
+ -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
+
+_c_flags = $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$(*F).o)
+_a_flags = $(AFLAGS) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
+_cpp_flags = $(CPPFLAGS) $(EXTRA_CPPFLAGS) $(CPPFLAGS_$(@F))
+
+# If building the kernel in a separate objtree expand all occurrences
+# of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
+
+ifeq ($(KBUILD_SRC),)
+__c_flags = $(_c_flags)
+__a_flags = $(_a_flags)
+__cpp_flags = $(_cpp_flags)
+else
+
+# -I$(obj) locates generated .h files
+# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
+# and locates generated .h files
+# FIXME: Replace both with specific CFLAGS* statements in the makefiles
+__c_flags = $(call addtree,-I$(obj)) $(call flags,_c_flags)
+__a_flags = $(call flags,_a_flags)
+__cpp_flags = $(call flags,_cpp_flags)
+endif
+
+c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(CPPFLAGS) \
+ $(__c_flags) $(modkern_cflags) \
+ -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
+
+a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(CPPFLAGS) \
+ $(__a_flags) $(modkern_aflags)
+
+cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(__cpp_flags)
+
+ld_flags = $(LDFLAGS) $(EXTRA_LDFLAGS)
+
+# Finds the multi-part object the current object will be linked into
+modname-multi = $(sort $(foreach m,$(multi-used),\
+ $(if $(filter $(subst $(obj)/,,$*.o), $($(m:.o=-objs)) $($(m:.o=-y))),$(m:.o=))))
+
+# Shipped files
+# ===========================================================================
+
+quiet_cmd_shipped = SHIPPED $@
+cmd_shipped = cat $< > $@
+
+$(obj)/%:: $(src)/%_shipped
+ $(call cmd,shipped)
+
+# Commands useful for building a boot image
+# ===========================================================================
+#
+# Use as following:
+#
+# target: source(s) FORCE
+# $(if_changed,ld/objcopy/gzip)
+#
+# and add target to EXTRA_TARGETS so that we know we have to
+# read in the saved command line
+
+# Linking
+# ---------------------------------------------------------------------------
+
+quiet_cmd_ld = LD $@
+cmd_ld = $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDFLAGS_$(@F)) \
+ $(filter-out FORCE,$^) -o $@
+
+# Objcopy
+# ---------------------------------------------------------------------------
+
+quiet_cmd_objcopy = OBJCOPY $@
+cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
+
+# Gzip
+# ---------------------------------------------------------------------------
+
+quiet_cmd_gzip = GZIP $@
+cmd_gzip = gzip -f -9 < $< > $@
+
+
--- /dev/null
+# ===========================================================================
+# Module versions
+# ===========================================================================
+#
+# Stage one of module building created the following:
+# a) The individual .o files used for the module
+# b) A <module>.o file which is the .o files above linked together
+# c) A <module>.mod file in $(MODVERDIR)/, listing the name of the
+# the preliminary <module>.o file, plus all .o files
+
+# Stage 2 is handled by this file and does the following
+# 1) Find all modules from the files listed in $(MODVERDIR)/
+# 2) modpost is then used to
+# 3) create one <module>.mod.c file pr. module
+# 4) create one Module.symvers file with CRC for all exported symbols
+# 5) compile all <module>.mod.c files
+# 6) final link of the module to a <module.ko> file
+
+# Step 3 is used to place certain information in the module's ELF
+# section, including information such as:
+# Version magic (see include/vermagic.h for full details)
+# - Kernel release
+# - SMP is CONFIG_SMP
+# - PREEMPT is CONFIG_PREEMPT
+# - GCC Version
+# Module info
+# - Module version (MODULE_VERSION)
+# - Module alias'es (MODULE_ALIAS)
+# - Module license (MODULE_LICENSE)
+# - See include/linux/module.h for more details
+
+# Step 4 is solely used to allow module versioning in external modules,
+# where the CRC of each module is retrieved from the Module.symers file.
+
+PHONY := _modpost
+_modpost: __modpost
+
+include .config
+include scripts/Kbuild.include
+include scripts/Makefile.lib
+
+kernelsymfile := $(objtree)/Module.symvers
+modulesymfile := $(KBUILD_EXTMOD)/Modules.symvers
+
+# Step 1), find all modules listed in $(MODVERDIR)/
+__modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod)))
+modules := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
+
+_modpost: $(modules)
+
+
+# Step 2), invoke modpost
+# Includes step 3,4
+quiet_cmd_modpost = MODPOST
+ cmd_modpost = scripts/mod/modpost \
+ $(if $(CONFIG_MODVERSIONS),-m) \
+ $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
+ $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
+ $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
+ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
+ $(filter-out FORCE,$^)
+
+PHONY += __modpost
+__modpost: $(wildcard vmlinux) $(modules:.ko=.o) FORCE
+ $(call cmd,modpost)
+
+# Declare generated files as targets for modpost
+$(symverfile): __modpost ;
+$(modules:.ko=.mod.c): __modpost ;
+
+
+# Step 5), compile all *.mod.c files
+
+# modname is set to make c_flags define KBUILD_MODNAME
+modname = $(*F)
+
+quiet_cmd_cc_o_c = CC $@
+ cmd_cc_o_c = $(CC) $(c_flags) $(CFLAGS_MODULE) \
+ -c -o $@ $<
+
+$(modules:.ko=.mod.o): %.mod.o: %.mod.c FORCE
+ $(call if_changed_dep,cc_o_c)
+
+targets += $(modules:.ko=.mod.o)
+
+# Step 6), final link of the modules
+quiet_cmd_ld_ko_o = LD [M] $@
+ cmd_ld_ko_o = $(LD) $(LDFLAGS) $(LDFLAGS_MODULE) -o $@ \
+ $(filter-out FORCE,$^)
+
+$(modules): %.ko :%.o %.mod.o FORCE
+ $(call if_changed,ld_ko_o)
+
+targets += $(modules)
+
+
+# Add FORCE to the prequisites of a target to force it to be always rebuilt.
+# ---------------------------------------------------------------------------
+
+PHONY += FORCE
+
+FORCE:
+
+# Read all saved command lines and dependencies for the $(targets) we
+# may be building above, using $(if_changed{,_dep}). As an
+# optimization, we don't need to read them if the target does not
+# exist, we will rebuild anyway in that case.
+
+targets := $(wildcard $(sort $(targets)))
+cmd_files := $(wildcard $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+
+ifneq ($(cmd_files),)
+ include $(cmd_files)
+endif
+
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable se we can use it in if_changed and friends.
+
+.PHONY: $(PHONY)
--- /dev/null
+###
+# Makefile.basic list the most basic programs used during the build process.
+# The programs listed herein is what is needed to do the basic stuff,
+# such as splitting .config and fix dependency file.
+# This initial step is needed to avoid files to be recompiled
+# when kernel configuration changes (which is what happens when
+# .config is included by main Makefile.
+# ---------------------------------------------------------------------------
+# fixdep: Used to generate dependency information during build process
+# split-include: Divide all config symbols up in a number of files in
+# include/config/...
+# docproc: Used in Documentation/docbook
+
+hostprogs-y := fixdep split-include docproc
+always := $(hostprogs-y)
+
+# fixdep is needed to compile other host programs
+$(addprefix $(obj)/,$(filter-out fixdep,$(always))): $(obj)/fixdep
--- /dev/null
+/*
+ * docproc is a simple preprocessor for the template files
+ * used as placeholders for the kernel internal documentation.
+ * docproc is used for documentation-frontend and
+ * dependency-generator.
+ * The two usages have in common that they require
+ * some knowledge of the .tmpl syntax, therefore they
+ * are kept together.
+ *
+ * documentation-frontend
+ * Scans the template file and call kernel-doc for
+ * all occurrences of ![EIF]file
+ * Beforehand each referenced file are scanned for
+ * any exported sympols "EXPORT_SYMBOL()" statements.
+ * This is used to create proper -function and
+ * -nofunction arguments in calls to kernel-doc.
+ * Usage: docproc doc file.tmpl
+ *
+ * dependency-generator:
+ * Scans the template file and list all files
+ * referenced in a format recognized by make.
+ * Usage: docproc depend file.tmpl
+ * Writes dependency information to stdout
+ * in the following format:
+ * file.tmpl src.c src2.c
+ * The filenames are obtained from the following constructs:
+ * !Efilename
+ * !Ifilename
+ * !Dfilename
+ * !Ffilename
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+/* exitstatus is used to keep track of any failing calls to kernel-doc,
+ * but execution continues. */
+int exitstatus = 0;
+
+typedef void DFL(char *);
+DFL *defaultline;
+
+typedef void FILEONLY(char * file);
+FILEONLY *internalfunctions;
+FILEONLY *externalfunctions;
+FILEONLY *symbolsonly;
+
+typedef void FILELINE(char * file, char * line);
+FILELINE * singlefunctions;
+FILELINE * entity_system;
+
+#define MAXLINESZ 2048
+#define MAXFILES 250
+#define KERNELDOCPATH "scripts/"
+#define KERNELDOC "kernel-doc"
+#define DOCBOOK "-docbook"
+#define FUNCTION "-function"
+#define NOFUNCTION "-nofunction"
+
+void usage (void)
+{
+ fprintf(stderr, "Usage: docproc {doc|depend} file\n");
+ fprintf(stderr, "Input is read from file.tmpl. Output is sent to stdout\n");
+ fprintf(stderr, "doc: frontend when generating kernel documentation\n");
+ fprintf(stderr, "depend: generate list of files referenced within file\n");
+}
+
+/*
+ * Execute kernel-doc with parameters givin in svec
+ */
+void exec_kernel_doc(char **svec)
+{
+ pid_t pid;
+ int ret;
+ char real_filename[PATH_MAX + 1];
+ /* Make sure output generated so far are flushed */
+ fflush(stdout);
+ switch(pid=fork()) {
+ case -1:
+ perror("fork");
+ exit(1);
+ case 0:
+ memset(real_filename, 0, sizeof(real_filename));
+ strncat(real_filename, getenv("SRCTREE"), PATH_MAX);
+ strncat(real_filename, KERNELDOCPATH KERNELDOC,
+ PATH_MAX - strlen(real_filename));
+ execvp(real_filename, svec);
+ fprintf(stderr, "exec ");
+ perror(real_filename);
+ exit(1);
+ default:
+ waitpid(pid, &ret ,0);
+ }
+ if (WIFEXITED(ret))
+ exitstatus |= WEXITSTATUS(ret);
+ else
+ exitstatus = 0xff;
+}
+
+/* Types used to create list of all exported symbols in a number of files */
+struct symbols
+{
+ char *name;
+};
+
+struct symfile
+{
+ char *filename;
+ struct symbols *symbollist;
+ int symbolcnt;
+};
+
+struct symfile symfilelist[MAXFILES];
+int symfilecnt = 0;
+
+void add_new_symbol(struct symfile *sym, char * symname)
+{
+ sym->symbollist =
+ realloc(sym->symbollist, (sym->symbolcnt + 1) * sizeof(char *));
+ sym->symbollist[sym->symbolcnt++].name = strdup(symname);
+}
+
+/* Add a filename to the list */
+struct symfile * add_new_file(char * filename)
+{
+ symfilelist[symfilecnt++].filename = strdup(filename);
+ return &symfilelist[symfilecnt - 1];
+}
+/* Check if file already are present in the list */
+struct symfile * filename_exist(char * filename)
+{
+ int i;
+ for (i=0; i < symfilecnt; i++)
+ if (strcmp(symfilelist[i].filename, filename) == 0)
+ return &symfilelist[i];
+ return NULL;
+}
+
+/*
+ * List all files referenced within the template file.
+ * Files are separated by tabs.
+ */
+void adddep(char * file) { printf("\t%s", file); }
+void adddep2(char * file, char * line) { line = line; adddep(file); }
+void noaction(char * line) { line = line; }
+void noaction2(char * file, char * line) { file = file; line = line; }
+
+/* Echo the line without further action */
+void printline(char * line) { printf("%s", line); }
+
+/*
+ * Find all symbols exported with EXPORT_SYMBOL and EXPORT_SYMBOL_GPL
+ * in filename.
+ * All symbols located are stored in symfilelist.
+ */
+void find_export_symbols(char * filename)
+{
+ FILE * fp;
+ struct symfile *sym;
+ char line[MAXLINESZ];
+ if (filename_exist(filename) == NULL) {
+ char real_filename[PATH_MAX + 1];
+ memset(real_filename, 0, sizeof(real_filename));
+ strncat(real_filename, getenv("SRCTREE"), PATH_MAX);
+ strncat(real_filename, filename,
+ PATH_MAX - strlen(real_filename));
+ sym = add_new_file(filename);
+ fp = fopen(real_filename, "r");
+ if (fp == NULL)
+ {
+ fprintf(stderr, "docproc: ");
+ perror(real_filename);
+ }
+ while(fgets(line, MAXLINESZ, fp)) {
+ char *p;
+ char *e;
+ if (((p = strstr(line, "EXPORT_SYMBOL_GPL")) != 0) ||
+ ((p = strstr(line, "EXPORT_SYMBOL")) != 0)) {
+ /* Skip EXPORT_SYMBOL{_GPL} */
+ while (isalnum(*p) || *p == '_')
+ p++;
+ /* Remove paranteses and additional ws */
+ while (isspace(*p))
+ p++;
+ if (*p != '(')
+ continue; /* Syntax error? */
+ else
+ p++;
+ while (isspace(*p))
+ p++;
+ e = p;
+ while (isalnum(*e) || *e == '_')
+ e++;
+ *e = '\0';
+ add_new_symbol(sym, p);
+ }
+ }
+ fclose(fp);
+ }
+}
+
+/*
+ * Document all external or internal functions in a file.
+ * Call kernel-doc with following parameters:
+ * kernel-doc -docbook -nofunction function_name1 filename
+ * function names are obtained from all the the src files
+ * by find_export_symbols.
+ * intfunc uses -nofunction
+ * extfunc uses -function
+ */
+void docfunctions(char * filename, char * type)
+{
+ int i,j;
+ int symcnt = 0;
+ int idx = 0;
+ char **vec;
+
+ for (i=0; i <= symfilecnt; i++)
+ symcnt += symfilelist[i].symbolcnt;
+ vec = malloc((2 + 2 * symcnt + 2) * sizeof(char*));
+ if (vec == NULL) {
+ perror("docproc: ");
+ exit(1);
+ }
+ vec[idx++] = KERNELDOC;
+ vec[idx++] = DOCBOOK;
+ for (i=0; i < symfilecnt; i++) {
+ struct symfile * sym = &symfilelist[i];
+ for (j=0; j < sym->symbolcnt; j++) {
+ vec[idx++] = type;
+ vec[idx++] = sym->symbollist[j].name;
+ }
+ }
+ vec[idx++] = filename;
+ vec[idx] = NULL;
+ printf("<!-- %s -->\n", filename);
+ exec_kernel_doc(vec);
+ fflush(stdout);
+ free(vec);
+}
+void intfunc(char * filename) { docfunctions(filename, NOFUNCTION); }
+void extfunc(char * filename) { docfunctions(filename, FUNCTION); }
+
+/*
+ * Document spåecific function(s) in a file.
+ * Call kernel-doc with the following parameters:
+ * kernel-doc -docbook -function function1 [-function function2]
+ */
+void singfunc(char * filename, char * line)
+{
+ char *vec[200]; /* Enough for specific functions */
+ int i, idx = 0;
+ int startofsym = 1;
+ vec[idx++] = KERNELDOC;
+ vec[idx++] = DOCBOOK;
+
+ /* Split line up in individual parameters preceeded by FUNCTION */
+ for (i=0; line[i]; i++) {
+ if (isspace(line[i])) {
+ line[i] = '\0';
+ startofsym = 1;
+ continue;
+ }
+ if (startofsym) {
+ startofsym = 0;
+ vec[idx++] = FUNCTION;
+ vec[idx++] = &line[i];
+ }
+ }
+ vec[idx++] = filename;
+ vec[idx] = NULL;
+ exec_kernel_doc(vec);
+}
+
+/*
+ * Parse file, calling action specific functions for:
+ * 1) Lines containing !E
+ * 2) Lines containing !I
+ * 3) Lines containing !D
+ * 4) Lines containing !F
+ * 5) Default lines - lines not matching the above
+ */
+void parse_file(FILE *infile)
+{
+ char line[MAXLINESZ];
+ char * s;
+ while(fgets(line, MAXLINESZ, infile)) {
+ if (line[0] == '!') {
+ s = line + 2;
+ switch (line[1]) {
+ case 'E':
+ while (*s && !isspace(*s)) s++;
+ *s = '\0';
+ externalfunctions(line+2);
+ break;
+ case 'I':
+ while (*s && !isspace(*s)) s++;
+ *s = '\0';
+ internalfunctions(line+2);
+ break;
+ case 'D':
+ while (*s && !isspace(*s)) s++;
+ *s = '\0';
+ symbolsonly(line+2);
+ break;
+ case 'F':
+ /* filename */
+ while (*s && !isspace(*s)) s++;
+ *s++ = '\0';
+ /* function names */
+ while (isspace(*s))
+ s++;
+ singlefunctions(line +2, s);
+ break;
+ default:
+ defaultline(line);
+ }
+ }
+ else {
+ defaultline(line);
+ }
+ }
+ fflush(stdout);
+}
+
+
+int main(int argc, char *argv[])
+{
+ FILE * infile;
+ if (argc != 3) {
+ usage();
+ exit(1);
+ }
+ /* Open file, exit on error */
+ infile = fopen(argv[2], "r");
+ if (infile == NULL) {
+ fprintf(stderr, "docproc: ");
+ perror(argv[2]);
+ exit(2);
+ }
+
+ if (strcmp("doc", argv[1]) == 0)
+ {
+ /* Need to do this in two passes.
+ * First pass is used to collect all symbols exported
+ * in the various files.
+ * Second pass generate the documentation.
+ * This is required because function are declared
+ * and exported in different files :-((
+ */
+ /* Collect symbols */
+ defaultline = noaction;
+ internalfunctions = find_export_symbols;
+ externalfunctions = find_export_symbols;
+ symbolsonly = find_export_symbols;
+ singlefunctions = noaction2;
+ parse_file(infile);
+
+ /* Rewind to start from beginning of file again */
+ fseek(infile, 0, SEEK_SET);
+ defaultline = printline;
+ internalfunctions = intfunc;
+ externalfunctions = extfunc;
+ symbolsonly = printline;
+ singlefunctions = singfunc;
+
+ parse_file(infile);
+ }
+ else if (strcmp("depend", argv[1]) == 0)
+ {
+ /* Create first part of dependency chain
+ * file.tmpl */
+ printf("%s\t", argv[2]);
+ defaultline = noaction;
+ internalfunctions = adddep;
+ externalfunctions = adddep;
+ symbolsonly = adddep;
+ singlefunctions = adddep2;
+ parse_file(infile);
+ printf("\n");
+ }
+ else
+ {
+ fprintf(stderr, "Unknown option: %s\n", argv[1]);
+ exit(1);
+ }
+ fclose(infile);
+ fflush(stdout);
+ return exitstatus;
+}
+
--- /dev/null
+/*
+ * "Optimize" a list of dependencies as spit out by gcc -MD
+ * for the kernel build
+ * ===========================================================================
+ *
+ * Author Kai Germaschewski
+ * Copyright 2002 by Kai Germaschewski <kai.germaschewski@gmx.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ *
+ * Introduction:
+ *
+ * gcc produces a very nice and correct list of dependencies which
+ * tells make when to remake a file.
+ *
+ * To use this list as-is however has the drawback that virtually
+ * every file in the kernel includes <linux/config.h> which then again
+ * includes <linux/autoconf.h>
+ *
+ * If the user re-runs make *config, linux/autoconf.h will be
+ * regenerated. make notices that and will rebuild every file which
+ * includes autoconf.h, i.e. basically all files. This is extremely
+ * annoying if the user just changed CONFIG_HIS_DRIVER from n to m.
+ *
+ * So we play the same trick that "mkdep" played before. We replace
+ * the dependency on linux/autoconf.h by a dependency on every config
+ * option which is mentioned in any of the listed prequisites.
+ *
+ * To be exact, split-include populates a tree in include/config/,
+ * e.g. include/config/his/driver.h, which contains the #define/#undef
+ * for the CONFIG_HIS_DRIVER option.
+ *
+ * So if the user changes his CONFIG_HIS_DRIVER option, only the objects
+ * which depend on "include/linux/config/his/driver.h" will be rebuilt,
+ * so most likely only his driver ;-)
+ *
+ * The idea above dates, by the way, back to Michael E Chastain, AFAIK.
+ *
+ * So to get dependencies right, there are two issues:
+ * o if any of the files the compiler read changed, we need to rebuild
+ * o if the command line given to the compile the file changed, we
+ * better rebuild as well.
+ *
+ * The former is handled by using the -MD output, the later by saving
+ * the command line used to compile the old object and comparing it
+ * to the one we would now use.
+ *
+ * Again, also this idea is pretty old and has been discussed on
+ * kbuild-devel a long time ago. I don't have a sensibly working
+ * internet connection right now, so I rather don't mention names
+ * without double checking.
+ *
+ * This code here has been based partially based on mkdep.c, which
+ * says the following about its history:
+ *
+ * Copyright abandoned, Michael Chastain, <mailto:mec@shout.net>.
+ * This is a C version of syncdep.pl by Werner Almesberger.
+ *
+ *
+ * It is invoked as
+ *
+ * fixdep <depfile> <target> <cmdline>
+ *
+ * and will read the dependency file <depfile>
+ *
+ * The transformed dependency snipped is written to stdout.
+ *
+ * It first generates a line
+ *
+ * cmd_<target> = <cmdline>
+ *
+ * and then basically copies the .<target>.d file to stdout, in the
+ * process filtering out the dependency on linux/autoconf.h and adding
+ * dependencies on include/config/my/option.h for every
+ * CONFIG_MY_OPTION encountered in any of the prequisites.
+ *
+ * It will also filter out all the dependencies on *.ver. We need
+ * to make sure that the generated version checksum are globally up
+ * to date before even starting the recursive build, so it's too late
+ * at this point anyway.
+ *
+ * The algorithm to grep for "CONFIG_..." is bit unusual, but should
+ * be fast ;-) We don't even try to really parse the header files, but
+ * merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will
+ * be picked up as well. It's not a problem with respect to
+ * correctness, since that can only give too many dependencies, thus
+ * we cannot miss a rebuild. Since people tend to not mention totally
+ * unrelated CONFIG_ options all over the place, it's not an
+ * efficiency problem either.
+ *
+ * (Note: it'd be easy to port over the complete mkdep state machine,
+ * but I don't think the added complexity is worth it)
+ */
+/*
+ * Note 2: if somebody writes HELLO_CONFIG_BOOM in a file, it will depend onto
+ * CONFIG_BOOM. This could seem a bug (not too hard to fix), but please do not
+ * fix it! Some UserModeLinux files (look at arch/um/) call CONFIG_BOOM as
+ * UML_CONFIG_BOOM, to avoid conflicts with /usr/include/linux/autoconf.h,
+ * through arch/um/include/uml-config.h; this fixdep "bug" makes sure that
+ * those files will have correct dependencies.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+#include <ctype.h>
+#include <arpa/inet.h>
+
+#define INT_CONF ntohl(0x434f4e46)
+#define INT_ONFI ntohl(0x4f4e4649)
+#define INT_NFIG ntohl(0x4e464947)
+#define INT_FIG_ ntohl(0x4649475f)
+
+char *target;
+char *depfile;
+char *cmdline;
+
+void usage(void)
+
+{
+ fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
+ exit(1);
+}
+
+/*
+ * Print out the commandline prefixed with cmd_<target filename> :=
+ */
+void print_cmdline(void)
+{
+ printf("cmd_%s := %s\n\n", target, cmdline);
+}
+
+char * str_config = NULL;
+int size_config = 0;
+int len_config = 0;
+
+/*
+ * Grow the configuration string to a desired length.
+ * Usually the first growth is plenty.
+ */
+void grow_config(int len)
+{
+ while (len_config + len > size_config) {
+ if (size_config == 0)
+ size_config = 2048;
+ str_config = realloc(str_config, size_config *= 2);
+ if (str_config == NULL)
+ { perror("fixdep:malloc"); exit(1); }
+ }
+}
+
+
+
+/*
+ * Lookup a value in the configuration string.
+ */
+int is_defined_config(const char * name, int len)
+{
+ const char * pconfig;
+ const char * plast = str_config + len_config - len;
+ for ( pconfig = str_config + 1; pconfig < plast; pconfig++ ) {
+ if (pconfig[ -1] == '\n'
+ && pconfig[len] == '\n'
+ && !memcmp(pconfig, name, len))
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Add a new value to the configuration string.
+ */
+void define_config(const char * name, int len)
+{
+ grow_config(len + 1);
+
+ memcpy(str_config+len_config, name, len);
+ len_config += len;
+ str_config[len_config++] = '\n';
+}
+
+/*
+ * Clear the set of configuration strings.
+ */
+void clear_config(void)
+{
+ len_config = 0;
+ define_config("", 0);
+}
+
+/*
+ * Record the use of a CONFIG_* word.
+ */
+void use_config(char *m, int slen)
+{
+ char s[PATH_MAX];
+ char *p;
+
+ if (is_defined_config(m, slen))
+ return;
+
+ define_config(m, slen);
+
+ memcpy(s, m, slen); s[slen] = 0;
+
+ for (p = s; p < s + slen; p++) {
+ if (*p == '_')
+ *p = '/';
+ else
+ *p = tolower((int)*p);
+ }
+ printf(" $(wildcard include/config/%s.h) \\\n", s);
+}
+
+void parse_config_file(char *map, size_t len)
+{
+ int *end = (int *) (map + len);
+ /* start at +1, so that p can never be < map */
+ int *m = (int *) map + 1;
+ char *p, *q;
+
+ for (; m < end; m++) {
+ if (*m == INT_CONF) { p = (char *) m ; goto conf; }
+ if (*m == INT_ONFI) { p = (char *) m-1; goto conf; }
+ if (*m == INT_NFIG) { p = (char *) m-2; goto conf; }
+ if (*m == INT_FIG_) { p = (char *) m-3; goto conf; }
+ continue;
+ conf:
+ if (p > map + len - 7)
+ continue;
+ if (memcmp(p, "CONFIG_", 7))
+ continue;
+ for (q = p + 7; q < map + len; q++) {
+ if (!(isalnum(*q) || *q == '_'))
+ goto found;
+ }
+ continue;
+
+ found:
+ use_config(p+7, q-p-7);
+ }
+}
+
+/* test is s ends in sub */
+int strrcmp(char *s, char *sub)
+{
+ int slen = strlen(s);
+ int sublen = strlen(sub);
+
+ if (sublen > slen)
+ return 1;
+
+ return memcmp(s + slen - sublen, sub, sublen);
+}
+
+void do_config_file(char *filename)
+{
+ struct stat st;
+ int fd;
+ void *map;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "fixdep: ");
+ perror(filename);
+ exit(2);
+ }
+ fstat(fd, &st);
+ if (st.st_size == 0) {
+ close(fd);
+ return;
+ }
+ map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if ((long) map == -1) {
+ perror("fixdep: mmap");
+ close(fd);
+ return;
+ }
+
+ parse_config_file(map, st.st_size);
+
+ munmap(map, st.st_size);
+
+ close(fd);
+}
+
+void parse_dep_file(void *map, size_t len)
+{
+ char *m = map;
+ char *end = m + len;
+ char *p;
+ char s[PATH_MAX];
+
+ p = strchr(m, ':');
+ if (!p) {
+ fprintf(stderr, "fixdep: parse error\n");
+ exit(1);
+ }
+ memcpy(s, m, p-m); s[p-m] = 0;
+ printf("deps_%s := \\\n", target);
+ m = p+1;
+
+ clear_config();
+
+ while (m < end) {
+ while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
+ m++;
+ p = m;
+ while (p < end && *p != ' ') p++;
+ if (p == end) {
+ do p--; while (!isalnum(*p));
+ p++;
+ }
+ memcpy(s, m, p-m); s[p-m] = 0;
+ if (strrcmp(s, "include/linux/autoconf.h") &&
+ strrcmp(s, "arch/um/include/uml-config.h") &&
+ strrcmp(s, ".ver")) {
+ printf(" %s \\\n", s);
+ do_config_file(s);
+ }
+ m = p + 1;
+ }
+ printf("\n%s: $(deps_%s)\n\n", target, target);
+ printf("$(deps_%s):\n", target);
+}
+
+void print_deps(void)
+{
+ struct stat st;
+ int fd;
+ void *map;
+
+ fd = open(depfile, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "fixdep: ");
+ perror(depfile);
+ exit(2);
+ }
+ fstat(fd, &st);
+ if (st.st_size == 0) {
+ fprintf(stderr,"fixdep: %s is empty\n",depfile);
+ close(fd);
+ return;
+ }
+ map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if ((long) map == -1) {
+ perror("fixdep: mmap");
+ close(fd);
+ return;
+ }
+
+ parse_dep_file(map, st.st_size);
+
+ munmap(map, st.st_size);
+
+ close(fd);
+}
+
+void traps(void)
+{
+ static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
+
+ if (*(int *)test != INT_CONF) {
+ fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
+ *(int *)test);
+ exit(2);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ traps();
+
+ if (argc != 4)
+ usage();
+
+ depfile = argv[1];
+ target = argv[2];
+ cmdline = argv[3];
+
+ print_cmdline();
+ print_deps();
+
+ return 0;
+}
--- /dev/null
+/*
+ * split-include.c
+ *
+ * Copyright abandoned, Michael Chastain, <mailto:mec@shout.net>.
+ * This is a C version of syncdep.pl by Werner Almesberger.
+ *
+ * This program takes autoconf.h as input and outputs a directory full
+ * of one-line include files, merging onto the old values.
+ *
+ * Think of the configuration options as key-value pairs. Then there
+ * are five cases:
+ *
+ * key old value new value action
+ *
+ * KEY-1 VALUE-1 VALUE-1 leave file alone
+ * KEY-2 VALUE-2A VALUE-2B write VALUE-2B into file
+ * KEY-3 - VALUE-3 write VALUE-3 into file
+ * KEY-4 VALUE-4 - write an empty file
+ * KEY-5 (empty) - leave old empty file alone
+ */
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define ERROR_EXIT(strExit) \
+ { \
+ const int errnoSave = errno; \
+ fprintf(stderr, "%s: ", str_my_name); \
+ errno = errnoSave; \
+ perror((strExit)); \
+ exit(1); \
+ }
+
+
+
+int main(int argc, const char * argv [])
+{
+ const char * str_my_name;
+ const char * str_file_autoconf;
+ const char * str_dir_config;
+
+ FILE * fp_config;
+ FILE * fp_target;
+ FILE * fp_find;
+
+ int buffer_size;
+
+ char * line;
+ char * old_line;
+ char * list_target;
+ char * ptarget;
+
+ struct stat stat_buf;
+
+ /* Check arg count. */
+ if (argc != 3)
+ {
+ fprintf(stderr, "%s: wrong number of arguments.\n", argv[0]);
+ exit(1);
+ }
+
+ str_my_name = argv[0];
+ str_file_autoconf = argv[1];
+ str_dir_config = argv[2];
+
+ /* Find a buffer size. */
+ if (stat(str_file_autoconf, &stat_buf) != 0)
+ ERROR_EXIT(str_file_autoconf);
+ buffer_size = 2 * stat_buf.st_size + 4096;
+
+ /* Allocate buffers. */
+ if ( (line = malloc(buffer_size)) == NULL
+ || (old_line = malloc(buffer_size)) == NULL
+ || (list_target = malloc(buffer_size)) == NULL )
+ ERROR_EXIT(str_file_autoconf);
+
+ /* Open autoconfig file. */
+ if ((fp_config = fopen(str_file_autoconf, "r")) == NULL)
+ ERROR_EXIT(str_file_autoconf);
+
+ /* Make output directory if needed. */
+ if (stat(str_dir_config, &stat_buf) != 0)
+ {
+ if (mkdir(str_dir_config, 0755) != 0)
+ ERROR_EXIT(str_dir_config);
+ }
+
+ /* Change to output directory. */
+ if (chdir(str_dir_config) != 0)
+ ERROR_EXIT(str_dir_config);
+
+ /* Put initial separator into target list. */
+ ptarget = list_target;
+ *ptarget++ = '\n';
+
+ /* Read config lines. */
+ while (fgets(line, buffer_size, fp_config))
+ {
+ const char * str_config;
+ int is_same;
+ int itarget;
+
+ if (line[0] != '#')
+ continue;
+ if ((str_config = strstr(line, "CONFIG_")) == NULL)
+ continue;
+
+ /* Make the output file name. */
+ str_config += sizeof("CONFIG_") - 1;
+ for (itarget = 0; !isspace(str_config[itarget]); itarget++)
+ {
+ int c = (unsigned char) str_config[itarget];
+ if (isupper(c)) c = tolower(c);
+ if (c == '_') c = '/';
+ ptarget[itarget] = c;
+ }
+ ptarget[itarget++] = '.';
+ ptarget[itarget++] = 'h';
+ ptarget[itarget++] = '\0';
+
+ /* Check for existing file. */
+ is_same = 0;
+ if ((fp_target = fopen(ptarget, "r")) != NULL)
+ {
+ fgets(old_line, buffer_size, fp_target);
+ if (fclose(fp_target) != 0)
+ ERROR_EXIT(ptarget);
+ if (!strcmp(line, old_line))
+ is_same = 1;
+ }
+
+ if (!is_same)
+ {
+ /* Auto-create directories. */
+ int islash;
+ for (islash = 0; islash < itarget; islash++)
+ {
+ if (ptarget[islash] == '/')
+ {
+ ptarget[islash] = '\0';
+ if (stat(ptarget, &stat_buf) != 0
+ && mkdir(ptarget, 0755) != 0)
+ ERROR_EXIT( ptarget );
+ ptarget[islash] = '/';
+ }
+ }
+
+ /* Write the file. */
+ if ((fp_target = fopen(ptarget, "w" )) == NULL)
+ ERROR_EXIT(ptarget);
+ fputs(line, fp_target);
+ if (ferror(fp_target) || fclose(fp_target) != 0)
+ ERROR_EXIT(ptarget);
+ }
+
+ /* Update target list */
+ ptarget += itarget;
+ *(ptarget-1) = '\n';
+ }
+
+ /*
+ * Close autoconfig file.
+ * Terminate the target list.
+ */
+ if (fclose(fp_config) != 0)
+ ERROR_EXIT(str_file_autoconf);
+ *ptarget = '\0';
+
+ /*
+ * Fix up existing files which have no new value.
+ * This is Case 4 and Case 5.
+ *
+ * I re-read the tree and filter it against list_target.
+ * This is crude. But it avoids data copies. Also, list_target
+ * is compact and contiguous, so it easily fits into cache.
+ *
+ * Notice that list_target contains strings separated by \n,
+ * with a \n before the first string and after the last.
+ * fgets gives the incoming names a terminating \n.
+ * So by having an initial \n, strstr will find exact matches.
+ */
+
+ fp_find = popen("find * -type f -name \"*.h\" -print", "r");
+ if (fp_find == 0)
+ ERROR_EXIT( "find" );
+
+ line[0] = '\n';
+ while (fgets(line+1, buffer_size, fp_find))
+ {
+ if (strstr(list_target, line) == NULL)
+ {
+ /*
+ * This is an old file with no CONFIG_* flag in autoconf.h.
+ */
+
+ /* First strip the \n. */
+ line[strlen(line)-1] = '\0';
+
+ /* Grab size. */
+ if (stat(line+1, &stat_buf) != 0)
+ ERROR_EXIT(line);
+
+ /* If file is not empty, make it empty and give it a fresh date. */
+ if (stat_buf.st_size != 0)
+ {
+ if ((fp_target = fopen(line+1, "w")) == NULL)
+ ERROR_EXIT(line);
+ if (fclose(fp_target) != 0)
+ ERROR_EXIT(line);
+ }
+ }
+ }
+
+ if (pclose(fp_find) != 0)
+ ERROR_EXIT("find");
+
+ return 0;
+}
--- /dev/null
+/* Generate assembler source containing symbol information
+ *
+ * Copyright 2002 by Kai Germaschewski
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * Usage: nm -n vmlinux | scripts/kallsyms [--all-symbols] > symbols.S
+ *
+ * ChangeLog:
+ *
+ * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
+ * Changed the compression method from stem compression to "table lookup"
+ * compression
+ *
+ * Table compression uses all the unused char codes on the symbols and
+ * maps these to the most used substrings (tokens). For instance, it might
+ * map char code 0xF7 to represent "write_" and then in every symbol where
+ * "write_" appears it can be replaced by 0xF7, saving 5 bytes.
+ * The used codes themselves are also placed in the table so that the
+ * decompresion can work without "special cases".
+ * Applied to kernel symbols, this usually produces a compression ratio
+ * of about 50%.
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+#define KSYM_NAME_LEN 127
+
+
+struct sym_entry {
+ unsigned long long addr;
+ unsigned int len;
+ unsigned char *sym;
+};
+
+
+static struct sym_entry *table;
+static unsigned int table_size, table_cnt;
+static unsigned long long _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext;
+static int all_symbols = 0;
+static char symbol_prefix_char = '\0';
+
+int token_profit[0x10000];
+
+/* the table that holds the result of the compression */
+unsigned char best_table[256][2];
+unsigned char best_table_len[256];
+
+
+static void usage(void)
+{
+ fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n");
+ exit(1);
+}
+
+/*
+ * This ignores the intensely annoying "mapping symbols" found
+ * in ARM ELF files: $a, $t and $d.
+ */
+static inline int is_arm_mapping_symbol(const char *str)
+{
+ return str[0] == '$' && strchr("atd", str[1])
+ && (str[2] == '\0' || str[2] == '.');
+}
+
+static int read_symbol(FILE *in, struct sym_entry *s)
+{
+ char str[500];
+ char *sym, stype;
+ int rc;
+
+ rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, str);
+ if (rc != 3) {
+ if (rc != EOF) {
+ /* skip line */
+ fgets(str, 500, in);
+ }
+ return -1;
+ }
+
+ sym = str;
+ /* skip prefix char */
+ if (symbol_prefix_char && str[0] == symbol_prefix_char)
+ sym++;
+
+ /* Ignore most absolute/undefined (?) symbols. */
+ if (strcmp(sym, "_stext") == 0)
+ _stext = s->addr;
+ else if (strcmp(sym, "_etext") == 0)
+ _etext = s->addr;
+ else if (strcmp(sym, "_sinittext") == 0)
+ _sinittext = s->addr;
+ else if (strcmp(sym, "_einittext") == 0)
+ _einittext = s->addr;
+ else if (strcmp(sym, "_sextratext") == 0)
+ _sextratext = s->addr;
+ else if (strcmp(sym, "_eextratext") == 0)
+ _eextratext = s->addr;
+ else if (toupper(stype) == 'A')
+ {
+ /* Keep these useful absolute symbols */
+ if (strcmp(sym, "__kernel_syscall_via_break") &&
+ strcmp(sym, "__kernel_syscall_via_epc") &&
+ strcmp(sym, "__kernel_sigtramp") &&
+ strcmp(sym, "__gp"))
+ return -1;
+
+ }
+ else if (toupper(stype) == 'U' ||
+ is_arm_mapping_symbol(sym))
+ return -1;
+ /* exclude also MIPS ELF local symbols ($L123 instead of .L123) */
+ else if (str[0] == '$')
+ return -1;
+
+ /* include the type field in the symbol name, so that it gets
+ * compressed together */
+ s->len = strlen(str) + 1;
+ s->sym = malloc(s->len + 1);
+ if (!s->sym) {
+ fprintf(stderr, "kallsyms failure: "
+ "unable to allocate required amount of memory\n");
+ exit(EXIT_FAILURE);
+ }
+ strcpy((char *)s->sym + 1, str);
+ s->sym[0] = stype;
+
+ return 0;
+}
+
+static int symbol_valid(struct sym_entry *s)
+{
+ /* Symbols which vary between passes. Passes 1 and 2 must have
+ * identical symbol lists. The kallsyms_* symbols below are only added
+ * after pass 1, they would be included in pass 2 when --all-symbols is
+ * specified so exclude them to get a stable symbol list.
+ */
+ static char *special_symbols[] = {
+ "kallsyms_addresses",
+ "kallsyms_num_syms",
+ "kallsyms_names",
+ "kallsyms_markers",
+ "kallsyms_token_table",
+ "kallsyms_token_index",
+
+ /* Exclude linker generated symbols which vary between passes */
+ "_SDA_BASE_", /* ppc */
+ "_SDA2_BASE_", /* ppc */
+ NULL };
+ int i;
+ int offset = 1;
+
+ /* skip prefix char */
+ if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
+ offset++;
+
+ /* if --all-symbols is not specified, then symbols outside the text
+ * and inittext sections are discarded */
+ if (!all_symbols) {
+ if ((s->addr < _stext || s->addr > _etext)
+ && (s->addr < _sinittext || s->addr > _einittext)
+ && (s->addr < _sextratext || s->addr > _eextratext))
+ return 0;
+ /* Corner case. Discard any symbols with the same value as
+ * _etext _einittext or _eextratext; they can move between pass
+ * 1 and 2 when the kallsyms data are added. If these symbols
+ * move then they may get dropped in pass 2, which breaks the
+ * kallsyms rules.
+ */
+ if ((s->addr == _etext && strcmp((char*)s->sym + offset, "_etext")) ||
+ (s->addr == _einittext && strcmp((char*)s->sym + offset, "_einittext")) ||
+ (s->addr == _eextratext && strcmp((char*)s->sym + offset, "_eextratext")))
+ return 0;
+ }
+
+ /* Exclude symbols which vary between passes. */
+ if (strstr((char *)s->sym + offset, "_compiled."))
+ return 0;
+
+ for (i = 0; special_symbols[i]; i++)
+ if( strcmp((char *)s->sym + offset, special_symbols[i]) == 0 )
+ return 0;
+
+ return 1;
+}
+
+static void read_map(FILE *in)
+{
+ while (!feof(in)) {
+ if (table_cnt >= table_size) {
+ table_size += 10000;
+ table = realloc(table, sizeof(*table) * table_size);
+ if (!table) {
+ fprintf(stderr, "out of memory\n");
+ exit (1);
+ }
+ }
+ if (read_symbol(in, &table[table_cnt]) == 0)
+ table_cnt++;
+ }
+}
+
+static void output_label(char *label)
+{
+ if (symbol_prefix_char)
+ printf(".globl %c%s\n", symbol_prefix_char, label);
+ else
+ printf(".globl %s\n", label);
+ printf("\tALGN\n");
+ if (symbol_prefix_char)
+ printf("%c%s:\n", symbol_prefix_char, label);
+ else
+ printf("%s:\n", label);
+}
+
+/* uncompress a compressed symbol. When this function is called, the best table
+ * might still be compressed itself, so the function needs to be recursive */
+static int expand_symbol(unsigned char *data, int len, char *result)
+{
+ int c, rlen, total=0;
+
+ while (len) {
+ c = *data;
+ /* if the table holds a single char that is the same as the one
+ * we are looking for, then end the search */
+ if (best_table[c][0]==c && best_table_len[c]==1) {
+ *result++ = c;
+ total++;
+ } else {
+ /* if not, recurse and expand */
+ rlen = expand_symbol(best_table[c], best_table_len[c], result);
+ total += rlen;
+ result += rlen;
+ }
+ data++;
+ len--;
+ }
+ *result=0;
+
+ return total;
+}
+
+static void write_src(void)
+{
+ unsigned int i, k, off;
+ unsigned int best_idx[256];
+ unsigned int *markers;
+ char buf[KSYM_NAME_LEN+1];
+
+ printf("#include <arch/types.h>\n");
+ printf("#if BITS_PER_LONG == 64\n");
+ printf("#define PTR .quad\n");
+ printf("#define ALGN .align 8\n");
+ printf("#else\n");
+ printf("#define PTR .long\n");
+ printf("#define ALGN .align 4\n");
+ printf("#endif\n");
+
+ printf(".data\n");
+
+ output_label("kallsyms_addresses");
+ for (i = 0; i < table_cnt; i++) {
+ printf("\tPTR\t%#llx\n", table[i].addr);
+ }
+ printf("\n");
+
+ output_label("kallsyms_num_syms");
+ printf("\tPTR\t%d\n", table_cnt);
+ printf("\n");
+
+ /* table of offset markers, that give the offset in the compressed stream
+ * every 256 symbols */
+ markers = malloc(sizeof(unsigned int) * ((table_cnt + 255) / 256));
+ if (!markers) {
+ fprintf(stderr, "kallsyms failure: "
+ "unable to allocate required memory\n");
+ exit(EXIT_FAILURE);
+ }
+
+ output_label("kallsyms_names");
+ off = 0;
+ for (i = 0; i < table_cnt; i++) {
+ if ((i & 0xFF) == 0)
+ markers[i >> 8] = off;
+
+ printf("\t.byte 0x%02x", table[i].len);
+ for (k = 0; k < table[i].len; k++)
+ printf(", 0x%02x", table[i].sym[k]);
+ printf("\n");
+
+ off += table[i].len + 1;
+ }
+ printf("\n");
+
+ output_label("kallsyms_markers");
+ for (i = 0; i < ((table_cnt + 255) >> 8); i++)
+ printf("\tPTR\t%d\n", markers[i]);
+ printf("\n");
+
+ free(markers);
+
+ output_label("kallsyms_token_table");
+ off = 0;
+ for (i = 0; i < 256; i++) {
+ best_idx[i] = off;
+ expand_symbol(best_table[i], best_table_len[i], buf);
+ printf("\t.asciz\t\"%s\"\n", buf);
+ off += strlen(buf) + 1;
+ }
+ printf("\n");
+
+ output_label("kallsyms_token_index");
+ for (i = 0; i < 256; i++)
+ printf("\t.short\t%d\n", best_idx[i]);
+ printf("\n");
+}
+
+
+/* table lookup compression functions */
+
+/* count all the possible tokens in a symbol */
+static void learn_symbol(unsigned char *symbol, int len)
+{
+ int i;
+
+ for (i = 0; i < len - 1; i++)
+ token_profit[ symbol[i] + (symbol[i + 1] << 8) ]++;
+}
+
+/* decrease the count for all the possible tokens in a symbol */
+static void forget_symbol(unsigned char *symbol, int len)
+{
+ int i;
+
+ for (i = 0; i < len - 1; i++)
+ token_profit[ symbol[i] + (symbol[i + 1] << 8) ]--;
+}
+
+/* remove all the invalid symbols from the table and do the initial token count */
+static void build_initial_tok_table(void)
+{
+ unsigned int i, pos;
+
+ pos = 0;
+ for (i = 0; i < table_cnt; i++) {
+ if ( symbol_valid(&table[i]) ) {
+ if (pos != i)
+ table[pos] = table[i];
+ learn_symbol(table[pos].sym, table[pos].len);
+ pos++;
+ }
+ }
+ table_cnt = pos;
+}
+
+/* replace a given token in all the valid symbols. Use the sampled symbols
+ * to update the counts */
+static void compress_symbols(unsigned char *str, int idx)
+{
+ unsigned int i, len, size;
+ unsigned char *p1, *p2;
+
+ for (i = 0; i < table_cnt; i++) {
+
+ len = table[i].len;
+ p1 = table[i].sym;
+
+ /* find the token on the symbol */
+ p2 = memmem(p1, len, str, 2);
+ if (!p2) continue;
+
+ /* decrease the counts for this symbol's tokens */
+ forget_symbol(table[i].sym, len);
+
+ size = len;
+
+ do {
+ *p2 = idx;
+ p2++;
+ size -= (p2 - p1);
+ memmove(p2, p2 + 1, size);
+ p1 = p2;
+ len--;
+
+ if (size < 2) break;
+
+ /* find the token on the symbol */
+ p2 = memmem(p1, size, str, 2);
+
+ } while (p2);
+
+ table[i].len = len;
+
+ /* increase the counts for this symbol's new tokens */
+ learn_symbol(table[i].sym, len);
+ }
+}
+
+/* search the token with the maximum profit */
+static int find_best_token(void)
+{
+ int i, best, bestprofit;
+
+ bestprofit=-10000;
+ best = 0;
+
+ for (i = 0; i < 0x10000; i++) {
+ if (token_profit[i] > bestprofit) {
+ best = i;
+ bestprofit = token_profit[i];
+ }
+ }
+ return best;
+}
+
+/* this is the core of the algorithm: calculate the "best" table */
+static void optimize_result(void)
+{
+ int i, best;
+
+ /* using the '\0' symbol last allows compress_symbols to use standard
+ * fast string functions */
+ for (i = 255; i >= 0; i--) {
+
+ /* if this table slot is empty (it is not used by an actual
+ * original char code */
+ if (!best_table_len[i]) {
+
+ /* find the token with the breates profit value */
+ best = find_best_token();
+
+ /* place it in the "best" table */
+ best_table_len[i] = 2;
+ best_table[i][0] = best & 0xFF;
+ best_table[i][1] = (best >> 8) & 0xFF;
+
+ /* replace this token in all the valid symbols */
+ compress_symbols(best_table[i], i);
+ }
+ }
+}
+
+/* start by placing the symbols that are actually used on the table */
+static void insert_real_symbols_in_table(void)
+{
+ unsigned int i, j, c;
+
+ memset(best_table, 0, sizeof(best_table));
+ memset(best_table_len, 0, sizeof(best_table_len));
+
+ for (i = 0; i < table_cnt; i++) {
+ for (j = 0; j < table[i].len; j++) {
+ c = table[i].sym[j];
+ best_table[c][0]=c;
+ best_table_len[c]=1;
+ }
+ }
+}
+
+static void optimize_token_table(void)
+{
+ build_initial_tok_table();
+
+ insert_real_symbols_in_table();
+
+ /* When valid symbol is not registered, exit to error */
+ if (!table_cnt) {
+ fprintf(stderr, "No valid symbol.\n");
+ exit(1);
+ }
+
+ optimize_result();
+}
+
+
+int main(int argc, char **argv)
+{
+ if (argc >= 2) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ if(strcmp(argv[i], "--all-symbols") == 0)
+ all_symbols = 1;
+ else if (strncmp(argv[i], "--symbol-prefix=", 16) == 0) {
+ char *p = &argv[i][16];
+ /* skip quote */
+ if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
+ p++;
+ symbol_prefix_char = *p;
+ } else
+ usage();
+ }
+ } else if (argc != 1)
+ usage();
+
+ read_map(stdin);
+ optimize_token_table();
+ write_src();
+
+ return 0;
+}
--- /dev/null
+# ===========================================================================
+# Kernel configuration targets
+# These targets are used from top-level makefile
+
+PHONY += oldconfig xconfig gconfig menuconfig config silentoldconfig update-po-config
+
+xconfig: $(obj)/qconf
+ $< arch/$(ARCH)/Kconfig
+
+gconfig: $(obj)/gconf
+ $< arch/$(ARCH)/Kconfig
+
+menuconfig: $(obj)/mconf
+ $(Q)$(MAKE) $(build)=scripts/kconfig/lxdialog
+ $< arch/$(ARCH)/Kconfig
+
+config: $(obj)/conf
+ $< arch/$(ARCH)/Kconfig
+
+oldconfig: $(obj)/conf
+ $< -o arch/$(ARCH)/Kconfig
+
+silentoldconfig: $(obj)/conf
+ $< -s arch/$(ARCH)/Kconfig
+
+update-po-config: $(obj)/kxgettext
+ xgettext --default-domain=linux \
+ --add-comments --keyword=_ --keyword=N_ \
+ --files-from=scripts/kconfig/POTFILES.in \
+ --output scripts/kconfig/config.pot
+ $(Q)ln -fs Kconfig_i386 arch/um/Kconfig_arch
+ $(Q)for i in `ls arch/`; \
+ do \
+ scripts/kconfig/kxgettext arch/$$i/Kconfig \
+ | msguniq -o scripts/kconfig/linux_$${i}.pot; \
+ done
+ $(Q)msgcat scripts/kconfig/config.pot \
+ `find scripts/kconfig/ -type f -name linux_*.pot` \
+ --output scripts/kconfig/linux_raw.pot
+ $(Q)msguniq --sort-by-file scripts/kconfig/linux_raw.pot \
+ --output scripts/kconfig/linux.pot
+ $(Q)rm -f arch/um/Kconfig_arch
+ $(Q)rm -f scripts/kconfig/linux_*.pot scripts/kconfig/config.pot
+
+PHONY += randconfig allyesconfig allnoconfig allmodconfig defconfig
+
+randconfig: $(obj)/conf
+ $< -r arch/$(ARCH)/Kconfig
+
+allyesconfig: $(obj)/conf
+ $< -y arch/$(ARCH)/Kconfig
+
+allnoconfig: $(obj)/conf
+ $< -n arch/$(ARCH)/Kconfig
+
+allmodconfig: $(obj)/conf
+ $< -m arch/$(ARCH)/Kconfig
+
+defconfig: $(obj)/conf
+ifeq ($(KBUILD_DEFCONFIG),)
+ $< -d arch/$(ARCH)/Kconfig
+else
+ @echo *** Default configuration is based on '$(KBUILD_DEFCONFIG)'
+ $(Q)$< -D arch/$(ARCH)/configs/$(KBUILD_DEFCONFIG) arch/$(ARCH)/Kconfig
+endif
+
+%_defconfig: $(obj)/conf
+ $(Q)$< -D arch/$(ARCH)/configs/$@ arch/$(ARCH)/Kconfig
+
+# Help text used by make help
+help:
+ @echo ' config - Update current config utilising a line-oriented program'
+ @echo ' menuconfig - Update current config utilising a menu based program'
+ @echo ' xconfig - Update current config utilising a QT based front-end'
+ @echo ' gconfig - Update current config utilising a GTK based front-end'
+ @echo ' oldconfig - Update current config utilising a provided .config as base'
+ @echo ' randconfig - New config with random answer to all options'
+ @echo ' defconfig - New config with default answer to all options'
+ @echo ' allmodconfig - New config selecting modules when possible'
+ @echo ' allyesconfig - New config where all options are accepted with yes'
+ @echo ' allnoconfig - New config where all options are answered with no'
+
+# ===========================================================================
+# Shared Makefile for the various kconfig executables:
+# conf: Used for defconfig, oldconfig and related targets
+# mconf: Used for the mconfig target.
+# Utilizes the lxdialog package
+# qconf: Used for the xconfig target
+# Based on QT which needs to be installed to compile it
+# gconf: Used for the gconfig target
+# Based on GTK which needs to be installed to compile it
+# object files used by all kconfig flavours
+
+hostprogs-y := conf mconf qconf gconf kxgettext
+conf-objs := conf.o zconf.tab.o
+mconf-objs := mconf.o zconf.tab.o
+kxgettext-objs := kxgettext.o zconf.tab.o
+
+ifeq ($(MAKECMDGOALS),xconfig)
+ qconf-target := 1
+endif
+ifeq ($(MAKECMDGOALS),gconfig)
+ gconf-target := 1
+endif
+
+
+ifeq ($(qconf-target),1)
+qconf-cxxobjs := qconf.o
+qconf-objs := kconfig_load.o zconf.tab.o
+endif
+
+ifeq ($(gconf-target),1)
+gconf-objs := gconf.o kconfig_load.o zconf.tab.o
+endif
+
+clean-files := lkc_defs.h qconf.moc .tmp_qtcheck \
+ .tmp_gtkcheck zconf.tab.c lex.zconf.c zconf.hash.c
+subdir- += lxdialog
+
+# Needed for systems without gettext
+KBUILD_HAVE_NLS := $(shell \
+ if echo "\#include <libintl.h>" | $(HOSTCC) $(HOSTCFLAGS) -E - > /dev/null 2>&1 ; \
+ then echo yes ; \
+ else echo no ; fi)
+ifeq ($(KBUILD_HAVE_NLS),no)
+HOSTCFLAGS += -DKBUILD_NO_NLS
+endif
+
+# generated files seem to need this to find local include files
+HOSTCFLAGS_lex.zconf.o := -I$(src)
+HOSTCFLAGS_zconf.tab.o := -I$(src)
+
+HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl
+HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK
+
+HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
+HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
+ -D LKC_DIRECT_LINK
+
+$(obj)/qconf.o: $(obj)/.tmp_qtcheck
+
+ifeq ($(qconf-target),1)
+$(obj)/.tmp_qtcheck: $(src)/Makefile
+-include $(obj)/.tmp_qtcheck
+
+# QT needs some extra effort...
+$(obj)/.tmp_qtcheck:
+ @set -e; echo " CHECK qt"; dir=""; pkg=""; \
+ pkg-config --exists qt 2> /dev/null && pkg=qt; \
+ pkg-config --exists qt-mt 2> /dev/null && pkg=qt-mt; \
+ if [ -n "$$pkg" ]; then \
+ cflags="\$$(shell pkg-config $$pkg --cflags)"; \
+ libs="\$$(shell pkg-config $$pkg --libs)"; \
+ moc="\$$(shell pkg-config $$pkg --variable=prefix)/bin/moc"; \
+ dir="$$(pkg-config $$pkg --variable=prefix)"; \
+ else \
+ for d in $$QTDIR /usr/share/qt* /usr/lib/qt*; do \
+ if [ -f $$d/include/qconfig.h ]; then dir=$$d; break; fi; \
+ done; \
+ if [ -z "$$dir" ]; then \
+ echo "*"; \
+ echo "* Unable to find the QT installation. Please make sure that"; \
+ echo "* the QT development package is correctly installed and"; \
+ echo "* either install pkg-config or set the QTDIR environment"; \
+ echo "* variable to the correct location."; \
+ echo "*"; \
+ false; \
+ fi; \
+ libpath=$$dir/lib; lib=qt; osdir=""; \
+ $(HOSTCXX) -print-multi-os-directory > /dev/null 2>&1 && \
+ osdir=x$$($(HOSTCXX) -print-multi-os-directory); \
+ test -d $$libpath/$$osdir && libpath=$$libpath/$$osdir; \
+ test -f $$libpath/libqt-mt.so && lib=qt-mt; \
+ cflags="-I$$dir/include"; \
+ libs="-L$$libpath -Wl,-rpath,$$libpath -l$$lib"; \
+ moc="$$dir/bin/moc"; \
+ fi; \
+ if [ ! -x $$dir/bin/moc -a -x /usr/bin/moc ]; then \
+ echo "*"; \
+ echo "* Unable to find $$dir/bin/moc, using /usr/bin/moc instead."; \
+ echo "*"; \
+ moc="/usr/bin/moc"; \
+ fi; \
+ echo "KC_QT_CFLAGS=$$cflags" > $@; \
+ echo "KC_QT_LIBS=$$libs" >> $@; \
+ echo "KC_QT_MOC=$$moc" >> $@
+endif
+
+$(obj)/gconf.o: $(obj)/.tmp_gtkcheck
+
+ifeq ($(gconf-target),1)
+-include $(obj)/.tmp_gtkcheck
+
+# GTK needs some extra effort, too...
+$(obj)/.tmp_gtkcheck:
+ @if `pkg-config --exists gtk+-2.0 gmodule-2.0 libglade-2.0`; then \
+ if `pkg-config --atleast-version=2.0.0 gtk+-2.0`; then \
+ touch $@; \
+ else \
+ echo "*"; \
+ echo "* GTK+ is present but version >= 2.0.0 is required."; \
+ echo "*"; \
+ false; \
+ fi \
+ else \
+ echo "*"; \
+ echo "* Unable to find the GTK+ installation. Please make sure that"; \
+ echo "* the GTK+ 2.0 development package is correctly installed..."; \
+ echo "* You need gtk+-2.0, glib-2.0 and libglade-2.0."; \
+ echo "*"; \
+ false; \
+ fi
+endif
+
+$(obj)/zconf.tab.o: $(obj)/lex.zconf.c $(obj)/zconf.hash.c
+
+$(obj)/kconfig_load.o: $(obj)/lkc_defs.h
+
+$(obj)/qconf.o: $(obj)/qconf.moc $(obj)/lkc_defs.h
+
+$(obj)/gconf.o: $(obj)/lkc_defs.h
+
+$(obj)/%.moc: $(src)/%.h
+ $(KC_QT_MOC) -i $< -o $@
+
+$(obj)/lkc_defs.h: $(src)/lkc_proto.h
+ sed < $< > $@ 's/P(\([^,]*\),.*/#define \1 (\*\1_p)/'
+
+
+###
+# The following requires flex/bison/gperf
+# By default we use the _shipped versions, uncomment the following line if
+# you are modifying the flex/bison src.
+# LKC_GENPARSER := 1
+
+ifdef LKC_GENPARSER
+
+$(obj)/zconf.tab.c: $(src)/zconf.y
+$(obj)/lex.zconf.c: $(src)/zconf.l
+$(obj)/zconf.hash.c: $(src)/zconf.gperf
+
+%.tab.c: %.y
+ bison -l -b $* -p $(notdir $*) $<
+ cp $@ $@_shipped
+
+lex.%.c: %.l
+ flex -L -P$(notdir $*) -o$@ $<
+ cp $@ $@_shipped
+
+%.hash.c: %.gperf
+ gperf < $< > $@
+ cp $@ $@_shipped
+
+endif
--- /dev/null
+scripts/kconfig/mconf.c
+scripts/kconfig/conf.c
+scripts/kconfig/confdata.c
+scripts/kconfig/gconf.c
+scripts/kconfig/qconf.cc
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/stat.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+static void conf(struct menu *menu);
+static void check_conf(struct menu *menu);
+
+enum {
+ ask_all,
+ ask_new,
+ ask_silent,
+ set_default,
+ set_yes,
+ set_mod,
+ set_no,
+ set_random
+} input_mode = ask_all;
+char *defconfig_file;
+
+static int indent = 1;
+static int valid_stdin = 1;
+static int conf_cnt;
+static char line[128];
+static struct menu *rootEntry;
+
+static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
+
+static void strip(char *str)
+{
+ char *p = str;
+ int l;
+
+ while ((isspace(*p)))
+ p++;
+ l = strlen(p);
+ if (p != str)
+ memmove(str, p, l + 1);
+ if (!l)
+ return;
+ p = str + l - 1;
+ while ((isspace(*p)))
+ *p-- = 0;
+}
+
+static void check_stdin(void)
+{
+ if (!valid_stdin && input_mode == ask_silent) {
+ printf(_("aborted!\n\n"));
+ printf(_("Console input/output is redirected. "));
+ printf(_("Run 'make oldconfig' to update configuration.\n\n"));
+ exit(1);
+ }
+}
+
+static char *fgets_check_stream(char *s, int size, FILE *stream)
+{
+ char *ret = fgets(s, size, stream);
+
+ if (ret == NULL && feof(stream)) {
+ printf(_("aborted!\n\n"));
+ printf(_("Console input is closed. "));
+ printf(_("Run 'make oldconfig' to update configuration.\n\n"));
+ exit(1);
+ }
+
+ return ret;
+}
+
+static void conf_askvalue(struct symbol *sym, const char *def)
+{
+ enum symbol_type type = sym_get_type(sym);
+ tristate val;
+
+ if (!sym_has_value(sym))
+ printf("(NEW) ");
+
+ line[0] = '\n';
+ line[1] = 0;
+
+ if (!sym_is_changable(sym)) {
+ printf("%s\n", def);
+ line[0] = '\n';
+ line[1] = 0;
+ return;
+ }
+
+ switch (input_mode) {
+ case set_no:
+ case set_mod:
+ case set_yes:
+ case set_random:
+ if (sym_has_value(sym)) {
+ printf("%s\n", def);
+ return;
+ }
+ break;
+ case ask_new:
+ case ask_silent:
+ if (sym_has_value(sym)) {
+ printf("%s\n", def);
+ return;
+ }
+ check_stdin();
+ case ask_all:
+ fflush(stdout);
+ fgets_check_stream(line, 128, stdin);
+ return;
+ case set_default:
+ printf("%s\n", def);
+ return;
+ default:
+ break;
+ }
+
+ switch (type) {
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+ printf("%s\n", def);
+ return;
+ default:
+ ;
+ }
+ switch (input_mode) {
+ case set_yes:
+ if (sym_tristate_within_range(sym, yes)) {
+ line[0] = 'y';
+ line[1] = '\n';
+ line[2] = 0;
+ break;
+ }
+ case set_mod:
+ if (type == S_TRISTATE) {
+ if (sym_tristate_within_range(sym, mod)) {
+ line[0] = 'm';
+ line[1] = '\n';
+ line[2] = 0;
+ break;
+ }
+ } else {
+ if (sym_tristate_within_range(sym, yes)) {
+ line[0] = 'y';
+ line[1] = '\n';
+ line[2] = 0;
+ break;
+ }
+ }
+ case set_no:
+ if (sym_tristate_within_range(sym, no)) {
+ line[0] = 'n';
+ line[1] = '\n';
+ line[2] = 0;
+ break;
+ }
+ case set_random:
+ do {
+ val = (tristate)(random() % 3);
+ } while (!sym_tristate_within_range(sym, val));
+ switch (val) {
+ case no: line[0] = 'n'; break;
+ case mod: line[0] = 'm'; break;
+ case yes: line[0] = 'y'; break;
+ }
+ line[1] = '\n';
+ line[2] = 0;
+ break;
+ default:
+ break;
+ }
+ printf("%s", line);
+}
+
+int conf_string(struct menu *menu)
+{
+ struct symbol *sym = menu->sym;
+ const char *def, *help;
+
+ while (1) {
+ printf("%*s%s ", indent - 1, "", menu->prompt->text);
+ printf("(%s) ", sym->name);
+ def = sym_get_string_value(sym);
+ if (sym_get_string_value(sym))
+ printf("[%s] ", def);
+ conf_askvalue(sym, def);
+ switch (line[0]) {
+ case '\n':
+ break;
+ case '?':
+ /* print help */
+ if (line[1] == '\n') {
+ help = nohelp_text;
+ if (menu->sym->help)
+ help = menu->sym->help;
+ printf("\n%s\n", menu->sym->help);
+ def = NULL;
+ break;
+ }
+ default:
+ line[strlen(line)-1] = 0;
+ def = line;
+ }
+ if (def && sym_set_string_value(sym, def))
+ return 0;
+ }
+}
+
+static int conf_sym(struct menu *menu)
+{
+ struct symbol *sym = menu->sym;
+ int type;
+ tristate oldval, newval;
+ const char *help;
+
+ while (1) {
+ printf("%*s%s ", indent - 1, "", menu->prompt->text);
+ if (sym->name)
+ printf("(%s) ", sym->name);
+ type = sym_get_type(sym);
+ putchar('[');
+ oldval = sym_get_tristate_value(sym);
+ switch (oldval) {
+ case no:
+ putchar('N');
+ break;
+ case mod:
+ putchar('M');
+ break;
+ case yes:
+ putchar('Y');
+ break;
+ }
+ if (oldval != no && sym_tristate_within_range(sym, no))
+ printf("/n");
+ if (oldval != mod && sym_tristate_within_range(sym, mod))
+ printf("/m");
+ if (oldval != yes && sym_tristate_within_range(sym, yes))
+ printf("/y");
+ if (sym->help)
+ printf("/?");
+ printf("] ");
+ conf_askvalue(sym, sym_get_string_value(sym));
+ strip(line);
+
+ switch (line[0]) {
+ case 'n':
+ case 'N':
+ newval = no;
+ if (!line[1] || !strcmp(&line[1], "o"))
+ break;
+ continue;
+ case 'm':
+ case 'M':
+ newval = mod;
+ if (!line[1])
+ break;
+ continue;
+ case 'y':
+ case 'Y':
+ newval = yes;
+ if (!line[1] || !strcmp(&line[1], "es"))
+ break;
+ continue;
+ case 0:
+ newval = oldval;
+ break;
+ case '?':
+ goto help;
+ default:
+ continue;
+ }
+ if (sym_set_tristate_value(sym, newval))
+ return 0;
+help:
+ help = nohelp_text;
+ if (sym->help)
+ help = sym->help;
+ printf("\n%s\n", help);
+ }
+}
+
+static int conf_choice(struct menu *menu)
+{
+ struct symbol *sym, *def_sym;
+ struct menu *child;
+ int type;
+ bool is_new;
+
+ sym = menu->sym;
+ type = sym_get_type(sym);
+ is_new = !sym_has_value(sym);
+ if (sym_is_changable(sym)) {
+ conf_sym(menu);
+ sym_calc_value(sym);
+ switch (sym_get_tristate_value(sym)) {
+ case no:
+ return 1;
+ case mod:
+ return 0;
+ case yes:
+ break;
+ }
+ } else {
+ switch (sym_get_tristate_value(sym)) {
+ case no:
+ return 1;
+ case mod:
+ printf("%*s%s\n", indent - 1, "", menu_get_prompt(menu));
+ return 0;
+ case yes:
+ break;
+ }
+ }
+
+ while (1) {
+ int cnt, def;
+
+ printf("%*s%s\n", indent - 1, "", menu_get_prompt(menu));
+ def_sym = sym_get_choice_value(sym);
+ cnt = def = 0;
+ line[0] = 0;
+ for (child = menu->list; child; child = child->next) {
+ if (!menu_is_visible(child))
+ continue;
+ if (!child->sym) {
+ printf("%*c %s\n", indent, '*', menu_get_prompt(child));
+ continue;
+ }
+ cnt++;
+ if (child->sym == def_sym) {
+ def = cnt;
+ printf("%*c", indent, '>');
+ } else
+ printf("%*c", indent, ' ');
+ printf(" %d. %s", cnt, menu_get_prompt(child));
+ if (child->sym->name)
+ printf(" (%s)", child->sym->name);
+ if (!sym_has_value(child->sym))
+ printf(" (NEW)");
+ printf("\n");
+ }
+ printf("%*schoice", indent - 1, "");
+ if (cnt == 1) {
+ printf("[1]: 1\n");
+ goto conf_childs;
+ }
+ printf("[1-%d", cnt);
+ if (sym->help)
+ printf("?");
+ printf("]: ");
+ switch (input_mode) {
+ case ask_new:
+ case ask_silent:
+ if (!is_new) {
+ cnt = def;
+ printf("%d\n", cnt);
+ break;
+ }
+ check_stdin();
+ case ask_all:
+ fflush(stdout);
+ fgets_check_stream(line, 128, stdin);
+ strip(line);
+ if (line[0] == '?') {
+ printf("\n%s\n", menu->sym->help ?
+ menu->sym->help : nohelp_text);
+ continue;
+ }
+ if (!line[0])
+ cnt = def;
+ else if (isdigit(line[0]))
+ cnt = atoi(line);
+ else
+ continue;
+ break;
+ case set_random:
+ def = (random() % cnt) + 1;
+ case set_default:
+ case set_yes:
+ case set_mod:
+ case set_no:
+ cnt = def;
+ printf("%d\n", cnt);
+ break;
+ }
+
+ conf_childs:
+ for (child = menu->list; child; child = child->next) {
+ if (!child->sym || !menu_is_visible(child))
+ continue;
+ if (!--cnt)
+ break;
+ }
+ if (!child)
+ continue;
+ if (line[strlen(line) - 1] == '?') {
+ printf("\n%s\n", child->sym->help ?
+ child->sym->help : nohelp_text);
+ continue;
+ }
+ sym_set_choice_value(sym, child->sym);
+ if (child->list) {
+ indent += 2;
+ conf(child->list);
+ indent -= 2;
+ }
+ return 1;
+ }
+}
+
+static void conf(struct menu *menu)
+{
+ struct symbol *sym;
+ struct property *prop;
+ struct menu *child;
+
+ if (!menu_is_visible(menu))
+ return;
+
+ sym = menu->sym;
+ prop = menu->prompt;
+ if (prop) {
+ const char *prompt;
+
+ switch (prop->type) {
+ case P_MENU:
+ if (input_mode == ask_silent && rootEntry != menu) {
+ check_conf(menu);
+ return;
+ }
+ case P_COMMENT:
+ prompt = menu_get_prompt(menu);
+ if (prompt)
+ printf("%*c\n%*c %s\n%*c\n",
+ indent, '*',
+ indent, '*', prompt,
+ indent, '*');
+ default:
+ ;
+ }
+ }
+
+ if (!sym)
+ goto conf_childs;
+
+ if (sym_is_choice(sym)) {
+ conf_choice(menu);
+ if (sym->curr.tri != mod)
+ return;
+ goto conf_childs;
+ }
+
+ switch (sym->type) {
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+ conf_string(menu);
+ break;
+ default:
+ conf_sym(menu);
+ break;
+ }
+
+conf_childs:
+ if (sym)
+ indent += 2;
+ for (child = menu->list; child; child = child->next)
+ conf(child);
+ if (sym)
+ indent -= 2;
+}
+
+static void check_conf(struct menu *menu)
+{
+ struct symbol *sym;
+ struct menu *child;
+
+ if (!menu_is_visible(menu))
+ return;
+
+ sym = menu->sym;
+ if (sym && !sym_has_value(sym)) {
+ if (sym_is_changable(sym) ||
+ (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
+ if (!conf_cnt++)
+ printf(_("*\n* Restart config...\n*\n"));
+ rootEntry = menu_get_parent_menu(menu);
+ conf(rootEntry);
+ }
+ }
+
+ for (child = menu->list; child; child = child->next)
+ check_conf(child);
+}
+
+int main(int ac, char **av)
+{
+ int i = 1;
+ const char *name;
+ struct stat tmpstat;
+
+ if (ac > i && av[i][0] == '-') {
+ switch (av[i++][1]) {
+ case 'o':
+ input_mode = ask_new;
+ break;
+ case 's':
+ input_mode = ask_silent;
+ valid_stdin = isatty(0) && isatty(1) && isatty(2);
+ break;
+ case 'd':
+ input_mode = set_default;
+ break;
+ case 'D':
+ input_mode = set_default;
+ defconfig_file = av[i++];
+ if (!defconfig_file) {
+ printf(_("%s: No default config file specified\n"),
+ av[0]);
+ exit(1);
+ }
+ break;
+ case 'n':
+ input_mode = set_no;
+ break;
+ case 'm':
+ input_mode = set_mod;
+ break;
+ case 'y':
+ input_mode = set_yes;
+ break;
+ case 'r':
+ input_mode = set_random;
+ srandom(time(NULL));
+ break;
+ case 'h':
+ case '?':
+ fprintf(stderr, "See README for usage info\n");
+ exit(0);
+ }
+ }
+ name = av[i];
+ if (!name) {
+ printf(_("%s: Kconfig file missing\n"), av[0]);
+ }
+ conf_parse(name);
+ //zconfdump(stdout);
+ switch (input_mode) {
+ case set_default:
+ if (!defconfig_file)
+ defconfig_file = conf_get_default_confname();
+ if (conf_read(defconfig_file)) {
+ printf("***\n"
+ "*** Can't find default configuration \"%s\"!\n"
+ "***\n", defconfig_file);
+ exit(1);
+ }
+ break;
+ case ask_silent:
+ if (stat(".config", &tmpstat)) {
+ printf(_("***\n"
+ "*** You have not yet configured your kernel!\n"
+ "***\n"
+ "*** Please run some configurator (e.g. \"make oldconfig\" or\n"
+ "*** \"make menuconfig\" or \"make xconfig\").\n"
+ "***\n"));
+ exit(1);
+ }
+ case ask_all:
+ case ask_new:
+ conf_read(NULL);
+ break;
+ case set_no:
+ case set_mod:
+ case set_yes:
+ case set_random:
+ name = getenv("KCONFIG_ALLCONFIG");
+ if (name && !stat(name, &tmpstat)) {
+ conf_read_simple(name);
+ break;
+ }
+ switch (input_mode) {
+ case set_no: name = "allno.config"; break;
+ case set_mod: name = "allmod.config"; break;
+ case set_yes: name = "allyes.config"; break;
+ case set_random: name = "allrandom.config"; break;
+ default: break;
+ }
+ if (!stat(name, &tmpstat))
+ conf_read_simple(name);
+ else if (!stat("all.config", &tmpstat))
+ conf_read_simple("all.config");
+ break;
+ default:
+ break;
+ }
+
+ if (input_mode != ask_silent) {
+ rootEntry = &rootmenu;
+ conf(&rootmenu);
+ if (input_mode == ask_all) {
+ input_mode = ask_silent;
+ valid_stdin = 1;
+ }
+ }
+ do {
+ conf_cnt = 0;
+ check_conf(&rootmenu);
+ } while (conf_cnt);
+ if (conf_write(NULL)) {
+ fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n"));
+ return 1;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <sys/stat.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+static void conf_warning(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+static const char *conf_filename;
+static int conf_lineno, conf_warnings, conf_unsaved;
+
+const char conf_def_filename[] = ".config";
+
+const char conf_defname[] = "arch/$ARCH/defconfig";
+
+const char *conf_confnames[] = {
+ ".config",
+ conf_defname,
+ NULL,
+};
+
+static void conf_warning(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ fprintf(stderr, "%s:%d:warning: ", conf_filename, conf_lineno);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+ conf_warnings++;
+}
+
+static char *conf_expand_value(const char *in)
+{
+ struct symbol *sym;
+ const char *src;
+ static char res_value[SYMBOL_MAXLENGTH];
+ char *dst, name[SYMBOL_MAXLENGTH];
+
+ res_value[0] = 0;
+ dst = name;
+ while ((src = strchr(in, '$'))) {
+ strncat(res_value, in, src - in);
+ src++;
+ dst = name;
+ while (isalnum(*src) || *src == '_')
+ *dst++ = *src++;
+ *dst = 0;
+ sym = sym_lookup(name, 0);
+ sym_calc_value(sym);
+ strcat(res_value, sym_get_string_value(sym));
+ in = src;
+ }
+ strcat(res_value, in);
+
+ return res_value;
+}
+
+char *conf_get_default_confname(void)
+{
+ struct stat buf;
+ static char fullname[PATH_MAX+1];
+ char *env, *name;
+
+ name = conf_expand_value(conf_defname);
+ env = getenv(SRCTREE);
+ if (env) {
+ sprintf(fullname, "%s/%s", env, name);
+ if (!stat(fullname, &buf))
+ return fullname;
+ }
+ return name;
+}
+
+int conf_read_simple(const char *name)
+{
+ FILE *in = NULL;
+ char line[1024];
+ char *p, *p2;
+ struct symbol *sym;
+ int i;
+
+ if (name) {
+ in = zconf_fopen(name);
+ } else {
+ const char **names = conf_confnames;
+ while ((name = *names++)) {
+ name = conf_expand_value(name);
+ in = zconf_fopen(name);
+ if (in) {
+ printf(_("#\n"
+ "# using defaults found in %s\n"
+ "#\n"), name);
+ break;
+ }
+ }
+ }
+ if (!in)
+ return 1;
+
+ conf_filename = name;
+ conf_lineno = 0;
+ conf_warnings = 0;
+ conf_unsaved = 0;
+
+ for_all_symbols(i, sym) {
+ sym->flags |= SYMBOL_NEW | SYMBOL_CHANGED;
+ if (sym_is_choice(sym))
+ sym->flags &= ~SYMBOL_NEW;
+ sym->flags &= ~SYMBOL_VALID;
+ switch (sym->type) {
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+ if (sym->user.val)
+ free(sym->user.val);
+ default:
+ sym->user.val = NULL;
+ sym->user.tri = no;
+ }
+ }
+
+ while (fgets(line, sizeof(line), in)) {
+ conf_lineno++;
+ sym = NULL;
+ switch (line[0]) {
+ case '#':
+ if (memcmp(line + 2, "CONFIG_", 7))
+ continue;
+ p = strchr(line + 9, ' ');
+ if (!p)
+ continue;
+ *p++ = 0;
+ if (strncmp(p, "is not set", 10))
+ continue;
+ sym = sym_find(line + 9);
+ if (!sym) {
+ conf_warning("trying to assign nonexistent symbol %s", line + 9);
+ break;
+ } else if (!(sym->flags & SYMBOL_NEW)) {
+ conf_warning("trying to reassign symbol %s", sym->name);
+ break;
+ }
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ sym->user.tri = no;
+ sym->flags &= ~SYMBOL_NEW;
+ break;
+ default:
+ ;
+ }
+ break;
+ case 'C':
+ if (memcmp(line, "CONFIG_", 7)) {
+ conf_warning("unexpected data");
+ continue;
+ }
+ p = strchr(line + 7, '=');
+ if (!p)
+ continue;
+ *p++ = 0;
+ p2 = strchr(p, '\n');
+ if (p2)
+ *p2 = 0;
+ sym = sym_find(line + 7);
+ if (!sym) {
+ conf_warning("trying to assign nonexistent symbol %s", line + 7);
+ break;
+ } else if (!(sym->flags & SYMBOL_NEW)) {
+ conf_warning("trying to reassign symbol %s", sym->name);
+ break;
+ }
+ switch (sym->type) {
+ case S_TRISTATE:
+ if (p[0] == 'm') {
+ sym->user.tri = mod;
+ sym->flags &= ~SYMBOL_NEW;
+ break;
+ }
+ case S_BOOLEAN:
+ if (p[0] == 'y') {
+ sym->user.tri = yes;
+ sym->flags &= ~SYMBOL_NEW;
+ break;
+ }
+ if (p[0] == 'n') {
+ sym->user.tri = no;
+ sym->flags &= ~SYMBOL_NEW;
+ break;
+ }
+ conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+ break;
+ case S_STRING:
+ if (*p++ != '"')
+ break;
+ for (p2 = p; (p2 = strpbrk(p2, "\"\\")); p2++) {
+ if (*p2 == '"') {
+ *p2 = 0;
+ break;
+ }
+ memmove(p2, p2 + 1, strlen(p2));
+ }
+ if (!p2) {
+ conf_warning("invalid string found");
+ continue;
+ }
+ case S_INT:
+ case S_HEX:
+ if (sym_string_valid(sym, p)) {
+ sym->user.val = strdup(p);
+ sym->flags &= ~SYMBOL_NEW;
+ } else {
+ conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+ continue;
+ }
+ break;
+ default:
+ ;
+ }
+ break;
+ case '\n':
+ break;
+ default:
+ conf_warning("unexpected data");
+ continue;
+ }
+ if (sym && sym_is_choice_value(sym)) {
+ struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
+ switch (sym->user.tri) {
+ case no:
+ break;
+ case mod:
+ if (cs->user.tri == yes) {
+ conf_warning("%s creates inconsistent choice state", sym->name);
+ cs->flags |= SYMBOL_NEW;
+ }
+ break;
+ case yes:
+ if (cs->user.tri != no) {
+ conf_warning("%s creates inconsistent choice state", sym->name);
+ cs->flags |= SYMBOL_NEW;
+ } else
+ cs->user.val = sym;
+ break;
+ }
+ cs->user.tri = E_OR(cs->user.tri, sym->user.tri);
+ }
+ }
+ fclose(in);
+
+ if (modules_sym)
+ sym_calc_value(modules_sym);
+ return 0;
+}
+
+int conf_read(const char *name)
+{
+ struct symbol *sym;
+ struct property *prop;
+ struct expr *e;
+ int i;
+
+ if (conf_read_simple(name))
+ return 1;
+
+ for_all_symbols(i, sym) {
+ sym_calc_value(sym);
+ if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO))
+ goto sym_ok;
+ if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) {
+ /* check that calculated value agrees with saved value */
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ if (sym->user.tri != sym_get_tristate_value(sym))
+ break;
+ if (!sym_is_choice(sym))
+ goto sym_ok;
+ default:
+ if (!strcmp(sym->curr.val, sym->user.val))
+ goto sym_ok;
+ break;
+ }
+ } else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE))
+ /* no previous value and not saved */
+ goto sym_ok;
+ conf_unsaved++;
+ /* maybe print value in verbose mode... */
+ sym_ok:
+ if (sym_has_value(sym) && !sym_is_choice_value(sym)) {
+ if (sym->visible == no)
+ sym->flags |= SYMBOL_NEW;
+ switch (sym->type) {
+ case S_STRING:
+ case S_INT:
+ case S_HEX:
+ if (!sym_string_within_range(sym, sym->user.val)) {
+ sym->flags |= SYMBOL_NEW;
+ sym->flags &= ~SYMBOL_VALID;
+ }
+ default:
+ break;
+ }
+ }
+ if (!sym_is_choice(sym))
+ continue;
+ prop = sym_get_choice_prop(sym);
+ for (e = prop->expr; e; e = e->left.expr)
+ if (e->right.sym->visible != no)
+ sym->flags |= e->right.sym->flags & SYMBOL_NEW;
+ }
+
+ sym_change_count = conf_warnings || conf_unsaved;
+
+ return 0;
+}
+
+int conf_write(const char *name)
+{
+ FILE *out, *out_h;
+ struct symbol *sym;
+ struct menu *menu;
+ const char *basename;
+ char dirname[128], tmpname[128], newname[128];
+ int type, l;
+ const char *str;
+ time_t now;
+ int use_timestamp = 1;
+ char *env;
+
+ dirname[0] = 0;
+ if (name && name[0]) {
+ struct stat st;
+ char *slash;
+
+ if (!stat(name, &st) && S_ISDIR(st.st_mode)) {
+ strcpy(dirname, name);
+ strcat(dirname, "/");
+ basename = conf_def_filename;
+ } else if ((slash = strrchr(name, '/'))) {
+ int size = slash - name + 1;
+ memcpy(dirname, name, size);
+ dirname[size] = 0;
+ if (slash[1])
+ basename = slash + 1;
+ else
+ basename = conf_def_filename;
+ } else
+ basename = name;
+ } else
+ basename = conf_def_filename;
+
+ sprintf(newname, "%s.tmpconfig.%d", dirname, (int)getpid());
+ out = fopen(newname, "w");
+ if (!out)
+ return 1;
+ out_h = NULL;
+ if (!name) {
+ out_h = fopen(".tmpconfig.h", "w");
+ if (!out_h)
+ return 1;
+ file_write_dep(NULL);
+ }
+ sym = sym_lookup("KERNELVERSION", 0);
+ sym_calc_value(sym);
+ time(&now);
+ env = getenv("KCONFIG_NOTIMESTAMP");
+ if (env && *env)
+ use_timestamp = 0;
+
+ fprintf(out, _("#\n"
+ "# Automatically generated make config: don't edit\n"
+ "# LWK kernel version: %s\n"
+ "%s%s"
+ "#\n"),
+ sym_get_string_value(sym),
+ use_timestamp ? "# " : "",
+ use_timestamp ? ctime(&now) : "");
+ if (out_h)
+ fprintf(out_h, "/*\n"
+ " * Automatically generated C config: don't edit\n"
+ " * LWK kernel version: %s\n"
+ "%s%s"
+ " */\n"
+ "#define AUTOCONF_INCLUDED\n",
+ sym_get_string_value(sym),
+ use_timestamp ? " * " : "",
+ use_timestamp ? ctime(&now) : "");
+
+ if (!sym_change_count)
+ sym_clear_all_valid();
+
+ menu = rootmenu.list;
+ while (menu) {
+ sym = menu->sym;
+ if (!sym) {
+ if (!menu_is_visible(menu))
+ goto next;
+ str = menu_get_prompt(menu);
+ fprintf(out, "\n"
+ "#\n"
+ "# %s\n"
+ "#\n", str);
+ if (out_h)
+ fprintf(out_h, "\n"
+ "/*\n"
+ " * %s\n"
+ " */\n", str);
+ } else if (!(sym->flags & SYMBOL_CHOICE)) {
+ sym_calc_value(sym);
+ if (!(sym->flags & SYMBOL_WRITE))
+ goto next;
+ sym->flags &= ~SYMBOL_WRITE;
+ type = sym->type;
+ if (type == S_TRISTATE) {
+ sym_calc_value(modules_sym);
+ if (modules_sym->curr.tri == no)
+ type = S_BOOLEAN;
+ }
+ switch (type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ switch (sym_get_tristate_value(sym)) {
+ case no:
+ fprintf(out, "# CONFIG_%s is not set\n", sym->name);
+ if (out_h)
+ fprintf(out_h, "#undef CONFIG_%s\n", sym->name);
+ break;
+ case mod:
+ fprintf(out, "CONFIG_%s=m\n", sym->name);
+ if (out_h)
+ fprintf(out_h, "#define CONFIG_%s_MODULE 1\n", sym->name);
+ break;
+ case yes:
+ fprintf(out, "CONFIG_%s=y\n", sym->name);
+ if (out_h)
+ fprintf(out_h, "#define CONFIG_%s 1\n", sym->name);
+ break;
+ }
+ break;
+ case S_STRING:
+ // fix me
+ str = sym_get_string_value(sym);
+ fprintf(out, "CONFIG_%s=\"", sym->name);
+ if (out_h)
+ fprintf(out_h, "#define CONFIG_%s \"", sym->name);
+ do {
+ l = strcspn(str, "\"\\");
+ if (l) {
+ fwrite(str, l, 1, out);
+ if (out_h)
+ fwrite(str, l, 1, out_h);
+ }
+ str += l;
+ while (*str == '\\' || *str == '"') {
+ fprintf(out, "\\%c", *str);
+ if (out_h)
+ fprintf(out_h, "\\%c", *str);
+ str++;
+ }
+ } while (*str);
+ fputs("\"\n", out);
+ if (out_h)
+ fputs("\"\n", out_h);
+ break;
+ case S_HEX:
+ str = sym_get_string_value(sym);
+ if (str[0] != '0' || (str[1] != 'x' && str[1] != 'X')) {
+ fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
+ if (out_h)
+ fprintf(out_h, "#define CONFIG_%s 0x%s\n", sym->name, str);
+ break;
+ }
+ case S_INT:
+ str = sym_get_string_value(sym);
+ fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
+ if (out_h)
+ fprintf(out_h, "#define CONFIG_%s %s\n", sym->name, str);
+ break;
+ }
+ }
+
+ next:
+ if (menu->list) {
+ menu = menu->list;
+ continue;
+ }
+ if (menu->next)
+ menu = menu->next;
+ else while ((menu = menu->parent)) {
+ if (menu->next) {
+ menu = menu->next;
+ break;
+ }
+ }
+ }
+ fclose(out);
+ if (out_h) {
+ fclose(out_h);
+ rename(".tmpconfig.h", "include/lwk/autoconf.h");
+ }
+ if (!name || basename != conf_def_filename) {
+ if (!name)
+ name = conf_def_filename;
+ sprintf(tmpname, "%s.old", name);
+ rename(name, tmpname);
+ }
+ sprintf(tmpname, "%s%s", dirname, basename);
+ if (rename(newname, tmpname))
+ return 1;
+
+ sym_change_count = 0;
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+#define DEBUG_EXPR 0
+
+struct expr *expr_alloc_symbol(struct symbol *sym)
+{
+ struct expr *e = malloc(sizeof(*e));
+ memset(e, 0, sizeof(*e));
+ e->type = E_SYMBOL;
+ e->left.sym = sym;
+ return e;
+}
+
+struct expr *expr_alloc_one(enum expr_type type, struct expr *ce)
+{
+ struct expr *e = malloc(sizeof(*e));
+ memset(e, 0, sizeof(*e));
+ e->type = type;
+ e->left.expr = ce;
+ return e;
+}
+
+struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2)
+{
+ struct expr *e = malloc(sizeof(*e));
+ memset(e, 0, sizeof(*e));
+ e->type = type;
+ e->left.expr = e1;
+ e->right.expr = e2;
+ return e;
+}
+
+struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2)
+{
+ struct expr *e = malloc(sizeof(*e));
+ memset(e, 0, sizeof(*e));
+ e->type = type;
+ e->left.sym = s1;
+ e->right.sym = s2;
+ return e;
+}
+
+struct expr *expr_alloc_and(struct expr *e1, struct expr *e2)
+{
+ if (!e1)
+ return e2;
+ return e2 ? expr_alloc_two(E_AND, e1, e2) : e1;
+}
+
+struct expr *expr_alloc_or(struct expr *e1, struct expr *e2)
+{
+ if (!e1)
+ return e2;
+ return e2 ? expr_alloc_two(E_OR, e1, e2) : e1;
+}
+
+struct expr *expr_copy(struct expr *org)
+{
+ struct expr *e;
+
+ if (!org)
+ return NULL;
+
+ e = malloc(sizeof(*org));
+ memcpy(e, org, sizeof(*org));
+ switch (org->type) {
+ case E_SYMBOL:
+ e->left = org->left;
+ break;
+ case E_NOT:
+ e->left.expr = expr_copy(org->left.expr);
+ break;
+ case E_EQUAL:
+ case E_UNEQUAL:
+ e->left.sym = org->left.sym;
+ e->right.sym = org->right.sym;
+ break;
+ case E_AND:
+ case E_OR:
+ case E_CHOICE:
+ e->left.expr = expr_copy(org->left.expr);
+ e->right.expr = expr_copy(org->right.expr);
+ break;
+ default:
+ printf("can't copy type %d\n", e->type);
+ free(e);
+ e = NULL;
+ break;
+ }
+
+ return e;
+}
+
+void expr_free(struct expr *e)
+{
+ if (!e)
+ return;
+
+ switch (e->type) {
+ case E_SYMBOL:
+ break;
+ case E_NOT:
+ expr_free(e->left.expr);
+ return;
+ case E_EQUAL:
+ case E_UNEQUAL:
+ break;
+ case E_OR:
+ case E_AND:
+ expr_free(e->left.expr);
+ expr_free(e->right.expr);
+ break;
+ default:
+ printf("how to free type %d?\n", e->type);
+ break;
+ }
+ free(e);
+}
+
+static int trans_count;
+
+#define e1 (*ep1)
+#define e2 (*ep2)
+
+static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct expr **ep2)
+{
+ if (e1->type == type) {
+ __expr_eliminate_eq(type, &e1->left.expr, &e2);
+ __expr_eliminate_eq(type, &e1->right.expr, &e2);
+ return;
+ }
+ if (e2->type == type) {
+ __expr_eliminate_eq(type, &e1, &e2->left.expr);
+ __expr_eliminate_eq(type, &e1, &e2->right.expr);
+ return;
+ }
+ if (e1->type == E_SYMBOL && e2->type == E_SYMBOL &&
+ e1->left.sym == e2->left.sym && (e1->left.sym->flags & (SYMBOL_YES|SYMBOL_NO)))
+ return;
+ if (!expr_eq(e1, e2))
+ return;
+ trans_count++;
+ expr_free(e1); expr_free(e2);
+ switch (type) {
+ case E_OR:
+ e1 = expr_alloc_symbol(&symbol_no);
+ e2 = expr_alloc_symbol(&symbol_no);
+ break;
+ case E_AND:
+ e1 = expr_alloc_symbol(&symbol_yes);
+ e2 = expr_alloc_symbol(&symbol_yes);
+ break;
+ default:
+ ;
+ }
+}
+
+void expr_eliminate_eq(struct expr **ep1, struct expr **ep2)
+{
+ if (!e1 || !e2)
+ return;
+ switch (e1->type) {
+ case E_OR:
+ case E_AND:
+ __expr_eliminate_eq(e1->type, ep1, ep2);
+ default:
+ ;
+ }
+ if (e1->type != e2->type) switch (e2->type) {
+ case E_OR:
+ case E_AND:
+ __expr_eliminate_eq(e2->type, ep1, ep2);
+ default:
+ ;
+ }
+ e1 = expr_eliminate_yn(e1);
+ e2 = expr_eliminate_yn(e2);
+}
+
+#undef e1
+#undef e2
+
+int expr_eq(struct expr *e1, struct expr *e2)
+{
+ int res, old_count;
+
+ if (e1->type != e2->type)
+ return 0;
+ switch (e1->type) {
+ case E_EQUAL:
+ case E_UNEQUAL:
+ return e1->left.sym == e2->left.sym && e1->right.sym == e2->right.sym;
+ case E_SYMBOL:
+ return e1->left.sym == e2->left.sym;
+ case E_NOT:
+ return expr_eq(e1->left.expr, e2->left.expr);
+ case E_AND:
+ case E_OR:
+ e1 = expr_copy(e1);
+ e2 = expr_copy(e2);
+ old_count = trans_count;
+ expr_eliminate_eq(&e1, &e2);
+ res = (e1->type == E_SYMBOL && e2->type == E_SYMBOL &&
+ e1->left.sym == e2->left.sym);
+ expr_free(e1);
+ expr_free(e2);
+ trans_count = old_count;
+ return res;
+ case E_CHOICE:
+ case E_RANGE:
+ case E_NONE:
+ /* panic */;
+ }
+
+ if (DEBUG_EXPR) {
+ expr_fprint(e1, stdout);
+ printf(" = ");
+ expr_fprint(e2, stdout);
+ printf(" ?\n");
+ }
+
+ return 0;
+}
+
+struct expr *expr_eliminate_yn(struct expr *e)
+{
+ struct expr *tmp;
+
+ if (e) switch (e->type) {
+ case E_AND:
+ e->left.expr = expr_eliminate_yn(e->left.expr);
+ e->right.expr = expr_eliminate_yn(e->right.expr);
+ if (e->left.expr->type == E_SYMBOL) {
+ if (e->left.expr->left.sym == &symbol_no) {
+ expr_free(e->left.expr);
+ expr_free(e->right.expr);
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_no;
+ e->right.expr = NULL;
+ return e;
+ } else if (e->left.expr->left.sym == &symbol_yes) {
+ free(e->left.expr);
+ tmp = e->right.expr;
+ *e = *(e->right.expr);
+ free(tmp);
+ return e;
+ }
+ }
+ if (e->right.expr->type == E_SYMBOL) {
+ if (e->right.expr->left.sym == &symbol_no) {
+ expr_free(e->left.expr);
+ expr_free(e->right.expr);
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_no;
+ e->right.expr = NULL;
+ return e;
+ } else if (e->right.expr->left.sym == &symbol_yes) {
+ free(e->right.expr);
+ tmp = e->left.expr;
+ *e = *(e->left.expr);
+ free(tmp);
+ return e;
+ }
+ }
+ break;
+ case E_OR:
+ e->left.expr = expr_eliminate_yn(e->left.expr);
+ e->right.expr = expr_eliminate_yn(e->right.expr);
+ if (e->left.expr->type == E_SYMBOL) {
+ if (e->left.expr->left.sym == &symbol_no) {
+ free(e->left.expr);
+ tmp = e->right.expr;
+ *e = *(e->right.expr);
+ free(tmp);
+ return e;
+ } else if (e->left.expr->left.sym == &symbol_yes) {
+ expr_free(e->left.expr);
+ expr_free(e->right.expr);
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_yes;
+ e->right.expr = NULL;
+ return e;
+ }
+ }
+ if (e->right.expr->type == E_SYMBOL) {
+ if (e->right.expr->left.sym == &symbol_no) {
+ free(e->right.expr);
+ tmp = e->left.expr;
+ *e = *(e->left.expr);
+ free(tmp);
+ return e;
+ } else if (e->right.expr->left.sym == &symbol_yes) {
+ expr_free(e->left.expr);
+ expr_free(e->right.expr);
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_yes;
+ e->right.expr = NULL;
+ return e;
+ }
+ }
+ break;
+ default:
+ ;
+ }
+ return e;
+}
+
+/*
+ * bool FOO!=n => FOO
+ */
+struct expr *expr_trans_bool(struct expr *e)
+{
+ if (!e)
+ return NULL;
+ switch (e->type) {
+ case E_AND:
+ case E_OR:
+ case E_NOT:
+ e->left.expr = expr_trans_bool(e->left.expr);
+ e->right.expr = expr_trans_bool(e->right.expr);
+ break;
+ case E_UNEQUAL:
+ // FOO!=n -> FOO
+ if (e->left.sym->type == S_TRISTATE) {
+ if (e->right.sym == &symbol_no) {
+ e->type = E_SYMBOL;
+ e->right.sym = NULL;
+ }
+ }
+ break;
+ default:
+ ;
+ }
+ return e;
+}
+
+/*
+ * e1 || e2 -> ?
+ */
+struct expr *expr_join_or(struct expr *e1, struct expr *e2)
+{
+ struct expr *tmp;
+ struct symbol *sym1, *sym2;
+
+ if (expr_eq(e1, e2))
+ return expr_copy(e1);
+ if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT)
+ return NULL;
+ if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT)
+ return NULL;
+ if (e1->type == E_NOT) {
+ tmp = e1->left.expr;
+ if (tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && tmp->type != E_SYMBOL)
+ return NULL;
+ sym1 = tmp->left.sym;
+ } else
+ sym1 = e1->left.sym;
+ if (e2->type == E_NOT) {
+ if (e2->left.expr->type != E_SYMBOL)
+ return NULL;
+ sym2 = e2->left.expr->left.sym;
+ } else
+ sym2 = e2->left.sym;
+ if (sym1 != sym2)
+ return NULL;
+ if (sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE)
+ return NULL;
+ if (sym1->type == S_TRISTATE) {
+ if (e1->type == E_EQUAL && e2->type == E_EQUAL &&
+ ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) ||
+ (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes))) {
+ // (a='y') || (a='m') -> (a!='n')
+ return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_no);
+ }
+ if (e1->type == E_EQUAL && e2->type == E_EQUAL &&
+ ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) ||
+ (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes))) {
+ // (a='y') || (a='n') -> (a!='m')
+ return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_mod);
+ }
+ if (e1->type == E_EQUAL && e2->type == E_EQUAL &&
+ ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) ||
+ (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod))) {
+ // (a='m') || (a='n') -> (a!='y')
+ return expr_alloc_comp(E_UNEQUAL, sym1, &symbol_yes);
+ }
+ }
+ if (sym1->type == S_BOOLEAN && sym1 == sym2) {
+ if ((e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && e2->type == E_SYMBOL) ||
+ (e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && e1->type == E_SYMBOL))
+ return expr_alloc_symbol(&symbol_yes);
+ }
+
+ if (DEBUG_EXPR) {
+ printf("optimize (");
+ expr_fprint(e1, stdout);
+ printf(") || (");
+ expr_fprint(e2, stdout);
+ printf(")?\n");
+ }
+ return NULL;
+}
+
+struct expr *expr_join_and(struct expr *e1, struct expr *e2)
+{
+ struct expr *tmp;
+ struct symbol *sym1, *sym2;
+
+ if (expr_eq(e1, e2))
+ return expr_copy(e1);
+ if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT)
+ return NULL;
+ if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT)
+ return NULL;
+ if (e1->type == E_NOT) {
+ tmp = e1->left.expr;
+ if (tmp->type != E_EQUAL && tmp->type != E_UNEQUAL && tmp->type != E_SYMBOL)
+ return NULL;
+ sym1 = tmp->left.sym;
+ } else
+ sym1 = e1->left.sym;
+ if (e2->type == E_NOT) {
+ if (e2->left.expr->type != E_SYMBOL)
+ return NULL;
+ sym2 = e2->left.expr->left.sym;
+ } else
+ sym2 = e2->left.sym;
+ if (sym1 != sym2)
+ return NULL;
+ if (sym1->type != S_BOOLEAN && sym1->type != S_TRISTATE)
+ return NULL;
+
+ if ((e1->type == E_SYMBOL && e2->type == E_EQUAL && e2->right.sym == &symbol_yes) ||
+ (e2->type == E_SYMBOL && e1->type == E_EQUAL && e1->right.sym == &symbol_yes))
+ // (a) && (a='y') -> (a='y')
+ return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes);
+
+ if ((e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_no) ||
+ (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_no))
+ // (a) && (a!='n') -> (a)
+ return expr_alloc_symbol(sym1);
+
+ if ((e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_mod) ||
+ (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_mod))
+ // (a) && (a!='m') -> (a='y')
+ return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes);
+
+ if (sym1->type == S_TRISTATE) {
+ if (e1->type == E_EQUAL && e2->type == E_UNEQUAL) {
+ // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b'
+ sym2 = e1->right.sym;
+ if ((e2->right.sym->flags & SYMBOL_CONST) && (sym2->flags & SYMBOL_CONST))
+ return sym2 != e2->right.sym ? expr_alloc_comp(E_EQUAL, sym1, sym2)
+ : expr_alloc_symbol(&symbol_no);
+ }
+ if (e1->type == E_UNEQUAL && e2->type == E_EQUAL) {
+ // (a='b') && (a!='c') -> 'b'='c' ? 'n' : a='b'
+ sym2 = e2->right.sym;
+ if ((e1->right.sym->flags & SYMBOL_CONST) && (sym2->flags & SYMBOL_CONST))
+ return sym2 != e1->right.sym ? expr_alloc_comp(E_EQUAL, sym1, sym2)
+ : expr_alloc_symbol(&symbol_no);
+ }
+ if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL &&
+ ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_no) ||
+ (e1->right.sym == &symbol_no && e2->right.sym == &symbol_yes)))
+ // (a!='y') && (a!='n') -> (a='m')
+ return expr_alloc_comp(E_EQUAL, sym1, &symbol_mod);
+
+ if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL &&
+ ((e1->right.sym == &symbol_yes && e2->right.sym == &symbol_mod) ||
+ (e1->right.sym == &symbol_mod && e2->right.sym == &symbol_yes)))
+ // (a!='y') && (a!='m') -> (a='n')
+ return expr_alloc_comp(E_EQUAL, sym1, &symbol_no);
+
+ if (e1->type == E_UNEQUAL && e2->type == E_UNEQUAL &&
+ ((e1->right.sym == &symbol_mod && e2->right.sym == &symbol_no) ||
+ (e1->right.sym == &symbol_no && e2->right.sym == &symbol_mod)))
+ // (a!='m') && (a!='n') -> (a='m')
+ return expr_alloc_comp(E_EQUAL, sym1, &symbol_yes);
+
+ if ((e1->type == E_SYMBOL && e2->type == E_EQUAL && e2->right.sym == &symbol_mod) ||
+ (e2->type == E_SYMBOL && e1->type == E_EQUAL && e1->right.sym == &symbol_mod) ||
+ (e1->type == E_SYMBOL && e2->type == E_UNEQUAL && e2->right.sym == &symbol_yes) ||
+ (e2->type == E_SYMBOL && e1->type == E_UNEQUAL && e1->right.sym == &symbol_yes))
+ return NULL;
+ }
+
+ if (DEBUG_EXPR) {
+ printf("optimize (");
+ expr_fprint(e1, stdout);
+ printf(") && (");
+ expr_fprint(e2, stdout);
+ printf(")?\n");
+ }
+ return NULL;
+}
+
+static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, struct expr **ep2)
+{
+#define e1 (*ep1)
+#define e2 (*ep2)
+ struct expr *tmp;
+
+ if (e1->type == type) {
+ expr_eliminate_dups1(type, &e1->left.expr, &e2);
+ expr_eliminate_dups1(type, &e1->right.expr, &e2);
+ return;
+ }
+ if (e2->type == type) {
+ expr_eliminate_dups1(type, &e1, &e2->left.expr);
+ expr_eliminate_dups1(type, &e1, &e2->right.expr);
+ return;
+ }
+ if (e1 == e2)
+ return;
+
+ switch (e1->type) {
+ case E_OR: case E_AND:
+ expr_eliminate_dups1(e1->type, &e1, &e1);
+ default:
+ ;
+ }
+
+ switch (type) {
+ case E_OR:
+ tmp = expr_join_or(e1, e2);
+ if (tmp) {
+ expr_free(e1); expr_free(e2);
+ e1 = expr_alloc_symbol(&symbol_no);
+ e2 = tmp;
+ trans_count++;
+ }
+ break;
+ case E_AND:
+ tmp = expr_join_and(e1, e2);
+ if (tmp) {
+ expr_free(e1); expr_free(e2);
+ e1 = expr_alloc_symbol(&symbol_yes);
+ e2 = tmp;
+ trans_count++;
+ }
+ break;
+ default:
+ ;
+ }
+#undef e1
+#undef e2
+}
+
+static void expr_eliminate_dups2(enum expr_type type, struct expr **ep1, struct expr **ep2)
+{
+#define e1 (*ep1)
+#define e2 (*ep2)
+ struct expr *tmp, *tmp1, *tmp2;
+
+ if (e1->type == type) {
+ expr_eliminate_dups2(type, &e1->left.expr, &e2);
+ expr_eliminate_dups2(type, &e1->right.expr, &e2);
+ return;
+ }
+ if (e2->type == type) {
+ expr_eliminate_dups2(type, &e1, &e2->left.expr);
+ expr_eliminate_dups2(type, &e1, &e2->right.expr);
+ }
+ if (e1 == e2)
+ return;
+
+ switch (e1->type) {
+ case E_OR:
+ expr_eliminate_dups2(e1->type, &e1, &e1);
+ // (FOO || BAR) && (!FOO && !BAR) -> n
+ tmp1 = expr_transform(expr_alloc_one(E_NOT, expr_copy(e1)));
+ tmp2 = expr_copy(e2);
+ tmp = expr_extract_eq_and(&tmp1, &tmp2);
+ if (expr_is_yes(tmp1)) {
+ expr_free(e1);
+ e1 = expr_alloc_symbol(&symbol_no);
+ trans_count++;
+ }
+ expr_free(tmp2);
+ expr_free(tmp1);
+ expr_free(tmp);
+ break;
+ case E_AND:
+ expr_eliminate_dups2(e1->type, &e1, &e1);
+ // (FOO && BAR) || (!FOO || !BAR) -> y
+ tmp1 = expr_transform(expr_alloc_one(E_NOT, expr_copy(e1)));
+ tmp2 = expr_copy(e2);
+ tmp = expr_extract_eq_or(&tmp1, &tmp2);
+ if (expr_is_no(tmp1)) {
+ expr_free(e1);
+ e1 = expr_alloc_symbol(&symbol_yes);
+ trans_count++;
+ }
+ expr_free(tmp2);
+ expr_free(tmp1);
+ expr_free(tmp);
+ break;
+ default:
+ ;
+ }
+#undef e1
+#undef e2
+}
+
+struct expr *expr_eliminate_dups(struct expr *e)
+{
+ int oldcount;
+ if (!e)
+ return e;
+
+ oldcount = trans_count;
+ while (1) {
+ trans_count = 0;
+ switch (e->type) {
+ case E_OR: case E_AND:
+ expr_eliminate_dups1(e->type, &e, &e);
+ expr_eliminate_dups2(e->type, &e, &e);
+ default:
+ ;
+ }
+ if (!trans_count)
+ break;
+ e = expr_eliminate_yn(e);
+ }
+ trans_count = oldcount;
+ return e;
+}
+
+struct expr *expr_transform(struct expr *e)
+{
+ struct expr *tmp;
+
+ if (!e)
+ return NULL;
+ switch (e->type) {
+ case E_EQUAL:
+ case E_UNEQUAL:
+ case E_SYMBOL:
+ case E_CHOICE:
+ break;
+ default:
+ e->left.expr = expr_transform(e->left.expr);
+ e->right.expr = expr_transform(e->right.expr);
+ }
+
+ switch (e->type) {
+ case E_EQUAL:
+ if (e->left.sym->type != S_BOOLEAN)
+ break;
+ if (e->right.sym == &symbol_no) {
+ e->type = E_NOT;
+ e->left.expr = expr_alloc_symbol(e->left.sym);
+ e->right.sym = NULL;
+ break;
+ }
+ if (e->right.sym == &symbol_mod) {
+ printf("boolean symbol %s tested for 'm'? test forced to 'n'\n", e->left.sym->name);
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_no;
+ e->right.sym = NULL;
+ break;
+ }
+ if (e->right.sym == &symbol_yes) {
+ e->type = E_SYMBOL;
+ e->right.sym = NULL;
+ break;
+ }
+ break;
+ case E_UNEQUAL:
+ if (e->left.sym->type != S_BOOLEAN)
+ break;
+ if (e->right.sym == &symbol_no) {
+ e->type = E_SYMBOL;
+ e->right.sym = NULL;
+ break;
+ }
+ if (e->right.sym == &symbol_mod) {
+ printf("boolean symbol %s tested for 'm'? test forced to 'y'\n", e->left.sym->name);
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_yes;
+ e->right.sym = NULL;
+ break;
+ }
+ if (e->right.sym == &symbol_yes) {
+ e->type = E_NOT;
+ e->left.expr = expr_alloc_symbol(e->left.sym);
+ e->right.sym = NULL;
+ break;
+ }
+ break;
+ case E_NOT:
+ switch (e->left.expr->type) {
+ case E_NOT:
+ // !!a -> a
+ tmp = e->left.expr->left.expr;
+ free(e->left.expr);
+ free(e);
+ e = tmp;
+ e = expr_transform(e);
+ break;
+ case E_EQUAL:
+ case E_UNEQUAL:
+ // !a='x' -> a!='x'
+ tmp = e->left.expr;
+ free(e);
+ e = tmp;
+ e->type = e->type == E_EQUAL ? E_UNEQUAL : E_EQUAL;
+ break;
+ case E_OR:
+ // !(a || b) -> !a && !b
+ tmp = e->left.expr;
+ e->type = E_AND;
+ e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr);
+ tmp->type = E_NOT;
+ tmp->right.expr = NULL;
+ e = expr_transform(e);
+ break;
+ case E_AND:
+ // !(a && b) -> !a || !b
+ tmp = e->left.expr;
+ e->type = E_OR;
+ e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr);
+ tmp->type = E_NOT;
+ tmp->right.expr = NULL;
+ e = expr_transform(e);
+ break;
+ case E_SYMBOL:
+ if (e->left.expr->left.sym == &symbol_yes) {
+ // !'y' -> 'n'
+ tmp = e->left.expr;
+ free(e);
+ e = tmp;
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_no;
+ break;
+ }
+ if (e->left.expr->left.sym == &symbol_mod) {
+ // !'m' -> 'm'
+ tmp = e->left.expr;
+ free(e);
+ e = tmp;
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_mod;
+ break;
+ }
+ if (e->left.expr->left.sym == &symbol_no) {
+ // !'n' -> 'y'
+ tmp = e->left.expr;
+ free(e);
+ e = tmp;
+ e->type = E_SYMBOL;
+ e->left.sym = &symbol_yes;
+ break;
+ }
+ break;
+ default:
+ ;
+ }
+ break;
+ default:
+ ;
+ }
+ return e;
+}
+
+int expr_contains_symbol(struct expr *dep, struct symbol *sym)
+{
+ if (!dep)
+ return 0;
+
+ switch (dep->type) {
+ case E_AND:
+ case E_OR:
+ return expr_contains_symbol(dep->left.expr, sym) ||
+ expr_contains_symbol(dep->right.expr, sym);
+ case E_SYMBOL:
+ return dep->left.sym == sym;
+ case E_EQUAL:
+ case E_UNEQUAL:
+ return dep->left.sym == sym ||
+ dep->right.sym == sym;
+ case E_NOT:
+ return expr_contains_symbol(dep->left.expr, sym);
+ default:
+ ;
+ }
+ return 0;
+}
+
+bool expr_depends_symbol(struct expr *dep, struct symbol *sym)
+{
+ if (!dep)
+ return false;
+
+ switch (dep->type) {
+ case E_AND:
+ return expr_depends_symbol(dep->left.expr, sym) ||
+ expr_depends_symbol(dep->right.expr, sym);
+ case E_SYMBOL:
+ return dep->left.sym == sym;
+ case E_EQUAL:
+ if (dep->left.sym == sym) {
+ if (dep->right.sym == &symbol_yes || dep->right.sym == &symbol_mod)
+ return true;
+ }
+ break;
+ case E_UNEQUAL:
+ if (dep->left.sym == sym) {
+ if (dep->right.sym == &symbol_no)
+ return true;
+ }
+ break;
+ default:
+ ;
+ }
+ return false;
+}
+
+struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2)
+{
+ struct expr *tmp = NULL;
+ expr_extract_eq(E_AND, &tmp, ep1, ep2);
+ if (tmp) {
+ *ep1 = expr_eliminate_yn(*ep1);
+ *ep2 = expr_eliminate_yn(*ep2);
+ }
+ return tmp;
+}
+
+struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2)
+{
+ struct expr *tmp = NULL;
+ expr_extract_eq(E_OR, &tmp, ep1, ep2);
+ if (tmp) {
+ *ep1 = expr_eliminate_yn(*ep1);
+ *ep2 = expr_eliminate_yn(*ep2);
+ }
+ return tmp;
+}
+
+void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2)
+{
+#define e1 (*ep1)
+#define e2 (*ep2)
+ if (e1->type == type) {
+ expr_extract_eq(type, ep, &e1->left.expr, &e2);
+ expr_extract_eq(type, ep, &e1->right.expr, &e2);
+ return;
+ }
+ if (e2->type == type) {
+ expr_extract_eq(type, ep, ep1, &e2->left.expr);
+ expr_extract_eq(type, ep, ep1, &e2->right.expr);
+ return;
+ }
+ if (expr_eq(e1, e2)) {
+ *ep = *ep ? expr_alloc_two(type, *ep, e1) : e1;
+ expr_free(e2);
+ if (type == E_AND) {
+ e1 = expr_alloc_symbol(&symbol_yes);
+ e2 = expr_alloc_symbol(&symbol_yes);
+ } else if (type == E_OR) {
+ e1 = expr_alloc_symbol(&symbol_no);
+ e2 = expr_alloc_symbol(&symbol_no);
+ }
+ }
+#undef e1
+#undef e2
+}
+
+struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym)
+{
+ struct expr *e1, *e2;
+
+ if (!e) {
+ e = expr_alloc_symbol(sym);
+ if (type == E_UNEQUAL)
+ e = expr_alloc_one(E_NOT, e);
+ return e;
+ }
+ switch (e->type) {
+ case E_AND:
+ e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym);
+ e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym);
+ if (sym == &symbol_yes)
+ e = expr_alloc_two(E_AND, e1, e2);
+ if (sym == &symbol_no)
+ e = expr_alloc_two(E_OR, e1, e2);
+ if (type == E_UNEQUAL)
+ e = expr_alloc_one(E_NOT, e);
+ return e;
+ case E_OR:
+ e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym);
+ e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym);
+ if (sym == &symbol_yes)
+ e = expr_alloc_two(E_OR, e1, e2);
+ if (sym == &symbol_no)
+ e = expr_alloc_two(E_AND, e1, e2);
+ if (type == E_UNEQUAL)
+ e = expr_alloc_one(E_NOT, e);
+ return e;
+ case E_NOT:
+ return expr_trans_compare(e->left.expr, type == E_EQUAL ? E_UNEQUAL : E_EQUAL, sym);
+ case E_UNEQUAL:
+ case E_EQUAL:
+ if (type == E_EQUAL) {
+ if (sym == &symbol_yes)
+ return expr_copy(e);
+ if (sym == &symbol_mod)
+ return expr_alloc_symbol(&symbol_no);
+ if (sym == &symbol_no)
+ return expr_alloc_one(E_NOT, expr_copy(e));
+ } else {
+ if (sym == &symbol_yes)
+ return expr_alloc_one(E_NOT, expr_copy(e));
+ if (sym == &symbol_mod)
+ return expr_alloc_symbol(&symbol_yes);
+ if (sym == &symbol_no)
+ return expr_copy(e);
+ }
+ break;
+ case E_SYMBOL:
+ return expr_alloc_comp(type, e->left.sym, sym);
+ case E_CHOICE:
+ case E_RANGE:
+ case E_NONE:
+ /* panic */;
+ }
+ return NULL;
+}
+
+tristate expr_calc_value(struct expr *e)
+{
+ tristate val1, val2;
+ const char *str1, *str2;
+
+ if (!e)
+ return yes;
+
+ switch (e->type) {
+ case E_SYMBOL:
+ sym_calc_value(e->left.sym);
+ return e->left.sym->curr.tri;
+ case E_AND:
+ val1 = expr_calc_value(e->left.expr);
+ val2 = expr_calc_value(e->right.expr);
+ return E_AND(val1, val2);
+ case E_OR:
+ val1 = expr_calc_value(e->left.expr);
+ val2 = expr_calc_value(e->right.expr);
+ return E_OR(val1, val2);
+ case E_NOT:
+ val1 = expr_calc_value(e->left.expr);
+ return E_NOT(val1);
+ case E_EQUAL:
+ sym_calc_value(e->left.sym);
+ sym_calc_value(e->right.sym);
+ str1 = sym_get_string_value(e->left.sym);
+ str2 = sym_get_string_value(e->right.sym);
+ return !strcmp(str1, str2) ? yes : no;
+ case E_UNEQUAL:
+ sym_calc_value(e->left.sym);
+ sym_calc_value(e->right.sym);
+ str1 = sym_get_string_value(e->left.sym);
+ str2 = sym_get_string_value(e->right.sym);
+ return !strcmp(str1, str2) ? no : yes;
+ default:
+ printf("expr_calc_value: %d?\n", e->type);
+ return no;
+ }
+}
+
+int expr_compare_type(enum expr_type t1, enum expr_type t2)
+{
+#if 0
+ return 1;
+#else
+ if (t1 == t2)
+ return 0;
+ switch (t1) {
+ case E_EQUAL:
+ case E_UNEQUAL:
+ if (t2 == E_NOT)
+ return 1;
+ case E_NOT:
+ if (t2 == E_AND)
+ return 1;
+ case E_AND:
+ if (t2 == E_OR)
+ return 1;
+ case E_OR:
+ if (t2 == E_CHOICE)
+ return 1;
+ case E_CHOICE:
+ if (t2 == 0)
+ return 1;
+ default:
+ return -1;
+ }
+ printf("[%dgt%d?]", t1, t2);
+ return 0;
+#endif
+}
+
+void expr_print(struct expr *e, void (*fn)(void *, const char *), void *data, int prevtoken)
+{
+ if (!e) {
+ fn(data, "y");
+ return;
+ }
+
+ if (expr_compare_type(prevtoken, e->type) > 0)
+ fn(data, "(");
+ switch (e->type) {
+ case E_SYMBOL:
+ if (e->left.sym->name)
+ fn(data, e->left.sym->name);
+ else
+ fn(data, "<choice>");
+ break;
+ case E_NOT:
+ fn(data, "!");
+ expr_print(e->left.expr, fn, data, E_NOT);
+ break;
+ case E_EQUAL:
+ fn(data, e->left.sym->name);
+ fn(data, "=");
+ fn(data, e->right.sym->name);
+ break;
+ case E_UNEQUAL:
+ fn(data, e->left.sym->name);
+ fn(data, "!=");
+ fn(data, e->right.sym->name);
+ break;
+ case E_OR:
+ expr_print(e->left.expr, fn, data, E_OR);
+ fn(data, " || ");
+ expr_print(e->right.expr, fn, data, E_OR);
+ break;
+ case E_AND:
+ expr_print(e->left.expr, fn, data, E_AND);
+ fn(data, " && ");
+ expr_print(e->right.expr, fn, data, E_AND);
+ break;
+ case E_CHOICE:
+ fn(data, e->right.sym->name);
+ if (e->left.expr) {
+ fn(data, " ^ ");
+ expr_print(e->left.expr, fn, data, E_CHOICE);
+ }
+ break;
+ case E_RANGE:
+ fn(data, "[");
+ fn(data, e->left.sym->name);
+ fn(data, " ");
+ fn(data, e->right.sym->name);
+ fn(data, "]");
+ break;
+ default:
+ {
+ char buf[32];
+ sprintf(buf, "<unknown type %d>", e->type);
+ fn(data, buf);
+ break;
+ }
+ }
+ if (expr_compare_type(prevtoken, e->type) > 0)
+ fn(data, ")");
+}
+
+static void expr_print_file_helper(void *data, const char *str)
+{
+ fwrite(str, strlen(str), 1, data);
+}
+
+void expr_fprint(struct expr *e, FILE *out)
+{
+ expr_print(e, expr_print_file_helper, out, E_NONE);
+}
+
+static void expr_print_gstr_helper(void *data, const char *str)
+{
+ str_append((struct gstr*)data, str);
+}
+
+void expr_gstr_print(struct expr *e, struct gstr *gs)
+{
+ expr_print(e, expr_print_gstr_helper, gs, E_NONE);
+}
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#ifndef EXPR_H
+#define EXPR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#ifndef __cplusplus
+#include <stdbool.h>
+#endif
+
+struct file {
+ struct file *next;
+ struct file *parent;
+ char *name;
+ int lineno;
+ int flags;
+};
+
+#define FILE_BUSY 0x0001
+#define FILE_SCANNED 0x0002
+#define FILE_PRINTED 0x0004
+
+typedef enum tristate {
+ no, mod, yes
+} tristate;
+
+enum expr_type {
+ E_NONE, E_OR, E_AND, E_NOT, E_EQUAL, E_UNEQUAL, E_CHOICE, E_SYMBOL, E_RANGE
+};
+
+union expr_data {
+ struct expr *expr;
+ struct symbol *sym;
+};
+
+struct expr {
+ enum expr_type type;
+ union expr_data left, right;
+};
+
+#define E_OR(dep1, dep2) (((dep1)>(dep2))?(dep1):(dep2))
+#define E_AND(dep1, dep2) (((dep1)<(dep2))?(dep1):(dep2))
+#define E_NOT(dep) (2-(dep))
+
+struct expr_value {
+ struct expr *expr;
+ tristate tri;
+};
+
+struct symbol_value {
+ void *val;
+ tristate tri;
+};
+
+enum symbol_type {
+ S_UNKNOWN, S_BOOLEAN, S_TRISTATE, S_INT, S_HEX, S_STRING, S_OTHER
+};
+
+struct symbol {
+ struct symbol *next;
+ char *name;
+ char *help;
+ enum symbol_type type;
+ struct symbol_value curr, user;
+ tristate visible;
+ int flags;
+ struct property *prop;
+ struct expr *dep, *dep2;
+ struct expr_value rev_dep;
+};
+
+#define for_all_symbols(i, sym) for (i = 0; i < 257; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER)
+
+#define SYMBOL_YES 0x0001
+#define SYMBOL_MOD 0x0002
+#define SYMBOL_NO 0x0004
+#define SYMBOL_CONST 0x0007
+#define SYMBOL_CHECK 0x0008
+#define SYMBOL_CHOICE 0x0010
+#define SYMBOL_CHOICEVAL 0x0020
+#define SYMBOL_PRINTED 0x0040
+#define SYMBOL_VALID 0x0080
+#define SYMBOL_OPTIONAL 0x0100
+#define SYMBOL_WRITE 0x0200
+#define SYMBOL_CHANGED 0x0400
+#define SYMBOL_NEW 0x0800
+#define SYMBOL_AUTO 0x1000
+#define SYMBOL_CHECKED 0x2000
+#define SYMBOL_WARNED 0x8000
+
+#define SYMBOL_MAXLENGTH 256
+#define SYMBOL_HASHSIZE 257
+#define SYMBOL_HASHMASK 0xff
+
+enum prop_type {
+ P_UNKNOWN, P_PROMPT, P_COMMENT, P_MENU, P_DEFAULT, P_CHOICE, P_SELECT, P_RANGE
+};
+
+struct property {
+ struct property *next;
+ struct symbol *sym;
+ enum prop_type type;
+ const char *text;
+ struct expr_value visible;
+ struct expr *expr;
+ struct menu *menu;
+ struct file *file;
+ int lineno;
+};
+
+#define for_all_properties(sym, st, tok) \
+ for (st = sym->prop; st; st = st->next) \
+ if (st->type == (tok))
+#define for_all_defaults(sym, st) for_all_properties(sym, st, P_DEFAULT)
+#define for_all_choices(sym, st) for_all_properties(sym, st, P_CHOICE)
+#define for_all_prompts(sym, st) \
+ for (st = sym->prop; st; st = st->next) \
+ if (st->text)
+
+struct menu {
+ struct menu *next;
+ struct menu *parent;
+ struct menu *list;
+ struct symbol *sym;
+ struct property *prompt;
+ struct expr *dep;
+ unsigned int flags;
+ //char *help;
+ struct file *file;
+ int lineno;
+ void *data;
+};
+
+#define MENU_CHANGED 0x0001
+#define MENU_ROOT 0x0002
+
+#ifndef SWIG
+
+extern struct file *file_list;
+extern struct file *current_file;
+struct file *lookup_file(const char *name);
+
+extern struct symbol symbol_yes, symbol_no, symbol_mod;
+extern struct symbol *modules_sym;
+extern int cdebug;
+struct expr *expr_alloc_symbol(struct symbol *sym);
+struct expr *expr_alloc_one(enum expr_type type, struct expr *ce);
+struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2);
+struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2);
+struct expr *expr_alloc_and(struct expr *e1, struct expr *e2);
+struct expr *expr_alloc_or(struct expr *e1, struct expr *e2);
+struct expr *expr_copy(struct expr *org);
+void expr_free(struct expr *e);
+int expr_eq(struct expr *e1, struct expr *e2);
+void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
+tristate expr_calc_value(struct expr *e);
+struct expr *expr_eliminate_yn(struct expr *e);
+struct expr *expr_trans_bool(struct expr *e);
+struct expr *expr_eliminate_dups(struct expr *e);
+struct expr *expr_transform(struct expr *e);
+int expr_contains_symbol(struct expr *dep, struct symbol *sym);
+bool expr_depends_symbol(struct expr *dep, struct symbol *sym);
+struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2);
+struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2);
+void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2);
+struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym);
+
+void expr_fprint(struct expr *e, FILE *out);
+struct gstr; /* forward */
+void expr_gstr_print(struct expr *e, struct gstr *gs);
+
+static inline int expr_is_yes(struct expr *e)
+{
+ return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes);
+}
+
+static inline int expr_is_no(struct expr *e)
+{
+ return e && (e->type == E_SYMBOL && e->left.sym == &symbol_no);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EXPR_H */
--- /dev/null
+/* Hey EMACS -*- linux-c -*- */
+/*
+ *
+ * Copyright (C) 2002-2003 Romain Lievin <roms@tilp.info>
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "lkc.h"
+#include "images.c"
+
+#include <glade/glade.h>
+#include <gtk/gtk.h>
+#include <glib.h>
+#include <gdk/gdkkeysyms.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <stdlib.h>
+
+//#define DEBUG
+
+enum {
+ SINGLE_VIEW, SPLIT_VIEW, FULL_VIEW
+};
+
+static gint view_mode = FULL_VIEW;
+static gboolean show_name = TRUE;
+static gboolean show_range = TRUE;
+static gboolean show_value = TRUE;
+static gboolean show_all = FALSE;
+static gboolean show_debug = FALSE;
+static gboolean resizeable = FALSE;
+
+static gboolean config_changed = FALSE;
+
+static char nohelp_text[] =
+ N_("Sorry, no help available for this option yet.\n");
+
+GtkWidget *main_wnd = NULL;
+GtkWidget *tree1_w = NULL; // left frame
+GtkWidget *tree2_w = NULL; // right frame
+GtkWidget *text_w = NULL;
+GtkWidget *hpaned = NULL;
+GtkWidget *vpaned = NULL;
+GtkWidget *back_btn = NULL;
+
+GtkTextTag *tag1, *tag2;
+GdkColor color;
+
+GtkTreeStore *tree1, *tree2, *tree;
+GtkTreeModel *model1, *model2;
+static GtkTreeIter *parents[256];
+static gint indent;
+
+static struct menu *current; // current node for SINGLE view
+static struct menu *browsed; // browsed node for SPLIT view
+
+enum {
+ COL_OPTION, COL_NAME, COL_NO, COL_MOD, COL_YES, COL_VALUE,
+ COL_MENU, COL_COLOR, COL_EDIT, COL_PIXBUF,
+ COL_PIXVIS, COL_BTNVIS, COL_BTNACT, COL_BTNINC, COL_BTNRAD,
+ COL_NUMBER
+};
+
+static void display_list(void);
+static void display_tree(struct menu *menu);
+static void display_tree_part(void);
+static void update_tree(struct menu *src, GtkTreeIter * dst);
+static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row);
+static gchar **fill_row(struct menu *menu);
+
+
+/* Helping/Debugging Functions */
+
+
+const char *dbg_print_stype(int val)
+{
+ static char buf[256];
+
+ bzero(buf, 256);
+
+ if (val == S_UNKNOWN)
+ strcpy(buf, "unknown");
+ if (val == S_BOOLEAN)
+ strcpy(buf, "boolean");
+ if (val == S_TRISTATE)
+ strcpy(buf, "tristate");
+ if (val == S_INT)
+ strcpy(buf, "int");
+ if (val == S_HEX)
+ strcpy(buf, "hex");
+ if (val == S_STRING)
+ strcpy(buf, "string");
+ if (val == S_OTHER)
+ strcpy(buf, "other");
+
+#ifdef DEBUG
+ printf("%s", buf);
+#endif
+
+ return buf;
+}
+
+const char *dbg_print_flags(int val)
+{
+ static char buf[256];
+
+ bzero(buf, 256);
+
+ if (val & SYMBOL_YES)
+ strcat(buf, "yes/");
+ if (val & SYMBOL_MOD)
+ strcat(buf, "mod/");
+ if (val & SYMBOL_NO)
+ strcat(buf, "no/");
+ if (val & SYMBOL_CONST)
+ strcat(buf, "const/");
+ if (val & SYMBOL_CHECK)
+ strcat(buf, "check/");
+ if (val & SYMBOL_CHOICE)
+ strcat(buf, "choice/");
+ if (val & SYMBOL_CHOICEVAL)
+ strcat(buf, "choiceval/");
+ if (val & SYMBOL_PRINTED)
+ strcat(buf, "printed/");
+ if (val & SYMBOL_VALID)
+ strcat(buf, "valid/");
+ if (val & SYMBOL_OPTIONAL)
+ strcat(buf, "optional/");
+ if (val & SYMBOL_WRITE)
+ strcat(buf, "write/");
+ if (val & SYMBOL_CHANGED)
+ strcat(buf, "changed/");
+ if (val & SYMBOL_NEW)
+ strcat(buf, "new/");
+ if (val & SYMBOL_AUTO)
+ strcat(buf, "auto/");
+
+ buf[strlen(buf) - 1] = '\0';
+#ifdef DEBUG
+ printf("%s", buf);
+#endif
+
+ return buf;
+}
+
+const char *dbg_print_ptype(int val)
+{
+ static char buf[256];
+
+ bzero(buf, 256);
+
+ if (val == P_UNKNOWN)
+ strcpy(buf, "unknown");
+ if (val == P_PROMPT)
+ strcpy(buf, "prompt");
+ if (val == P_COMMENT)
+ strcpy(buf, "comment");
+ if (val == P_MENU)
+ strcpy(buf, "menu");
+ if (val == P_DEFAULT)
+ strcpy(buf, "default");
+ if (val == P_CHOICE)
+ strcpy(buf, "choice");
+
+#ifdef DEBUG
+ printf("%s", buf);
+#endif
+
+ return buf;
+}
+
+
+void replace_button_icon(GladeXML * xml, GdkDrawable * window,
+ GtkStyle * style, gchar * btn_name, gchar ** xpm)
+{
+ GdkPixmap *pixmap;
+ GdkBitmap *mask;
+ GtkToolButton *button;
+ GtkWidget *image;
+
+ pixmap = gdk_pixmap_create_from_xpm_d(window, &mask,
+ &style->bg[GTK_STATE_NORMAL],
+ xpm);
+
+ button = GTK_TOOL_BUTTON(glade_xml_get_widget(xml, btn_name));
+ image = gtk_image_new_from_pixmap(pixmap, mask);
+ gtk_widget_show(image);
+ gtk_tool_button_set_icon_widget(button, image);
+}
+
+/* Main Window Initialization */
+void init_main_window(const gchar * glade_file)
+{
+ GladeXML *xml;
+ GtkWidget *widget;
+ GtkTextBuffer *txtbuf;
+ char title[256];
+ GtkStyle *style;
+
+ xml = glade_xml_new(glade_file, "window1", NULL);
+ if (!xml)
+ g_error(_("GUI loading failed !\n"));
+ glade_xml_signal_autoconnect(xml);
+
+ main_wnd = glade_xml_get_widget(xml, "window1");
+ hpaned = glade_xml_get_widget(xml, "hpaned1");
+ vpaned = glade_xml_get_widget(xml, "vpaned1");
+ tree1_w = glade_xml_get_widget(xml, "treeview1");
+ tree2_w = glade_xml_get_widget(xml, "treeview2");
+ text_w = glade_xml_get_widget(xml, "textview3");
+
+ back_btn = glade_xml_get_widget(xml, "button1");
+ gtk_widget_set_sensitive(back_btn, FALSE);
+
+ widget = glade_xml_get_widget(xml, "show_name1");
+ gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget,
+ show_name);
+
+ widget = glade_xml_get_widget(xml, "show_range1");
+ gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget,
+ show_range);
+
+ widget = glade_xml_get_widget(xml, "show_data1");
+ gtk_check_menu_item_set_active((GtkCheckMenuItem *) widget,
+ show_value);
+
+ style = gtk_widget_get_style(main_wnd);
+ widget = glade_xml_get_widget(xml, "toolbar1");
+
+#if 0 /* Use stock Gtk icons instead */
+ replace_button_icon(xml, main_wnd->window, style,
+ "button1", (gchar **) xpm_back);
+ replace_button_icon(xml, main_wnd->window, style,
+ "button2", (gchar **) xpm_load);
+ replace_button_icon(xml, main_wnd->window, style,
+ "button3", (gchar **) xpm_save);
+#endif
+ replace_button_icon(xml, main_wnd->window, style,
+ "button4", (gchar **) xpm_single_view);
+ replace_button_icon(xml, main_wnd->window, style,
+ "button5", (gchar **) xpm_split_view);
+ replace_button_icon(xml, main_wnd->window, style,
+ "button6", (gchar **) xpm_tree_view);
+
+#if 0
+ switch (view_mode) {
+ case SINGLE_VIEW:
+ widget = glade_xml_get_widget(xml, "button4");
+ g_signal_emit_by_name(widget, "clicked");
+ break;
+ case SPLIT_VIEW:
+ widget = glade_xml_get_widget(xml, "button5");
+ g_signal_emit_by_name(widget, "clicked");
+ break;
+ case FULL_VIEW:
+ widget = glade_xml_get_widget(xml, "button6");
+ g_signal_emit_by_name(widget, "clicked");
+ break;
+ }
+#endif
+ txtbuf = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w));
+ tag1 = gtk_text_buffer_create_tag(txtbuf, "mytag1",
+ "foreground", "red",
+ "weight", PANGO_WEIGHT_BOLD,
+ NULL);
+ tag2 = gtk_text_buffer_create_tag(txtbuf, "mytag2",
+ /*"style", PANGO_STYLE_OBLIQUE, */
+ NULL);
+
+ sprintf(title, _("LWK Kernel v%s Configuration"),
+ getenv("KERNELVERSION"));
+ gtk_window_set_title(GTK_WINDOW(main_wnd), title);
+
+ gtk_widget_show(main_wnd);
+}
+
+void init_tree_model(void)
+{
+ gint i;
+
+ tree = tree2 = gtk_tree_store_new(COL_NUMBER,
+ G_TYPE_STRING, G_TYPE_STRING,
+ G_TYPE_STRING, G_TYPE_STRING,
+ G_TYPE_STRING, G_TYPE_STRING,
+ G_TYPE_POINTER, GDK_TYPE_COLOR,
+ G_TYPE_BOOLEAN, GDK_TYPE_PIXBUF,
+ G_TYPE_BOOLEAN, G_TYPE_BOOLEAN,
+ G_TYPE_BOOLEAN, G_TYPE_BOOLEAN,
+ G_TYPE_BOOLEAN);
+ model2 = GTK_TREE_MODEL(tree2);
+
+ for (parents[0] = NULL, i = 1; i < 256; i++)
+ parents[i] = (GtkTreeIter *) g_malloc(sizeof(GtkTreeIter));
+
+ tree1 = gtk_tree_store_new(COL_NUMBER,
+ G_TYPE_STRING, G_TYPE_STRING,
+ G_TYPE_STRING, G_TYPE_STRING,
+ G_TYPE_STRING, G_TYPE_STRING,
+ G_TYPE_POINTER, GDK_TYPE_COLOR,
+ G_TYPE_BOOLEAN, GDK_TYPE_PIXBUF,
+ G_TYPE_BOOLEAN, G_TYPE_BOOLEAN,
+ G_TYPE_BOOLEAN, G_TYPE_BOOLEAN,
+ G_TYPE_BOOLEAN);
+ model1 = GTK_TREE_MODEL(tree1);
+}
+
+void init_left_tree(void)
+{
+ GtkTreeView *view = GTK_TREE_VIEW(tree1_w);
+ GtkCellRenderer *renderer;
+ GtkTreeSelection *sel;
+ GtkTreeViewColumn *column;
+
+ gtk_tree_view_set_model(view, model1);
+ gtk_tree_view_set_headers_visible(view, TRUE);
+ gtk_tree_view_set_rules_hint(view, FALSE);
+
+ column = gtk_tree_view_column_new();
+ gtk_tree_view_append_column(view, column);
+ gtk_tree_view_column_set_title(column, _("Options"));
+
+ renderer = gtk_cell_renderer_toggle_new();
+ gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column),
+ renderer, FALSE);
+ gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column),
+ renderer,
+ "active", COL_BTNACT,
+ "inconsistent", COL_BTNINC,
+ "visible", COL_BTNVIS,
+ "radio", COL_BTNRAD, NULL);
+ renderer = gtk_cell_renderer_text_new();
+ gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column),
+ renderer, FALSE);
+ gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column),
+ renderer,
+ "text", COL_OPTION,
+ "foreground-gdk",
+ COL_COLOR, NULL);
+
+ sel = gtk_tree_view_get_selection(view);
+ gtk_tree_selection_set_mode(sel, GTK_SELECTION_SINGLE);
+ gtk_widget_realize(tree1_w);
+}
+
+static void renderer_edited(GtkCellRendererText * cell,
+ const gchar * path_string,
+ const gchar * new_text, gpointer user_data);
+static void renderer_toggled(GtkCellRendererToggle * cellrenderertoggle,
+ gchar * arg1, gpointer user_data);
+
+void init_right_tree(void)
+{
+ GtkTreeView *view = GTK_TREE_VIEW(tree2_w);
+ GtkCellRenderer *renderer;
+ GtkTreeSelection *sel;
+ GtkTreeViewColumn *column;
+ gint i;
+
+ gtk_tree_view_set_model(view, model2);
+ gtk_tree_view_set_headers_visible(view, TRUE);
+ gtk_tree_view_set_rules_hint(view, FALSE);
+
+ column = gtk_tree_view_column_new();
+ gtk_tree_view_append_column(view, column);
+ gtk_tree_view_column_set_title(column, _("Options"));
+
+ renderer = gtk_cell_renderer_pixbuf_new();
+ gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column),
+ renderer, FALSE);
+ gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column),
+ renderer,
+ "pixbuf", COL_PIXBUF,
+ "visible", COL_PIXVIS, NULL);
+ renderer = gtk_cell_renderer_toggle_new();
+ gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column),
+ renderer, FALSE);
+ gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column),
+ renderer,
+ "active", COL_BTNACT,
+ "inconsistent", COL_BTNINC,
+ "visible", COL_BTNVIS,
+ "radio", COL_BTNRAD, NULL);
+ /*g_signal_connect(G_OBJECT(renderer), "toggled",
+ G_CALLBACK(renderer_toggled), NULL); */
+ renderer = gtk_cell_renderer_text_new();
+ gtk_tree_view_column_pack_start(GTK_TREE_VIEW_COLUMN(column),
+ renderer, FALSE);
+ gtk_tree_view_column_set_attributes(GTK_TREE_VIEW_COLUMN(column),
+ renderer,
+ "text", COL_OPTION,
+ "foreground-gdk",
+ COL_COLOR, NULL);
+
+ renderer = gtk_cell_renderer_text_new();
+ gtk_tree_view_insert_column_with_attributes(view, -1,
+ _("Name"), renderer,
+ "text", COL_NAME,
+ "foreground-gdk",
+ COL_COLOR, NULL);
+ renderer = gtk_cell_renderer_text_new();
+ gtk_tree_view_insert_column_with_attributes(view, -1,
+ "N", renderer,
+ "text", COL_NO,
+ "foreground-gdk",
+ COL_COLOR, NULL);
+ renderer = gtk_cell_renderer_text_new();
+ gtk_tree_view_insert_column_with_attributes(view, -1,
+ "M", renderer,
+ "text", COL_MOD,
+ "foreground-gdk",
+ COL_COLOR, NULL);
+ renderer = gtk_cell_renderer_text_new();
+ gtk_tree_view_insert_column_with_attributes(view, -1,
+ "Y", renderer,
+ "text", COL_YES,
+ "foreground-gdk",
+ COL_COLOR, NULL);
+ renderer = gtk_cell_renderer_text_new();
+ gtk_tree_view_insert_column_with_attributes(view, -1,
+ _("Value"), renderer,
+ "text", COL_VALUE,
+ "editable",
+ COL_EDIT,
+ "foreground-gdk",
+ COL_COLOR, NULL);
+ g_signal_connect(G_OBJECT(renderer), "edited",
+ G_CALLBACK(renderer_edited), NULL);
+
+ column = gtk_tree_view_get_column(view, COL_NAME);
+ gtk_tree_view_column_set_visible(column, show_name);
+ column = gtk_tree_view_get_column(view, COL_NO);
+ gtk_tree_view_column_set_visible(column, show_range);
+ column = gtk_tree_view_get_column(view, COL_MOD);
+ gtk_tree_view_column_set_visible(column, show_range);
+ column = gtk_tree_view_get_column(view, COL_YES);
+ gtk_tree_view_column_set_visible(column, show_range);
+ column = gtk_tree_view_get_column(view, COL_VALUE);
+ gtk_tree_view_column_set_visible(column, show_value);
+
+ if (resizeable) {
+ for (i = 0; i < COL_VALUE; i++) {
+ column = gtk_tree_view_get_column(view, i);
+ gtk_tree_view_column_set_resizable(column, TRUE);
+ }
+ }
+
+ sel = gtk_tree_view_get_selection(view);
+ gtk_tree_selection_set_mode(sel, GTK_SELECTION_SINGLE);
+}
+
+
+/* Utility Functions */
+
+
+static void text_insert_help(struct menu *menu)
+{
+ GtkTextBuffer *buffer;
+ GtkTextIter start, end;
+ const char *prompt = menu_get_prompt(menu);
+ gchar *name;
+ const char *help = _(nohelp_text);
+
+ if (!menu->sym)
+ help = "";
+ else if (menu->sym->help)
+ help = _(menu->sym->help);
+
+ if (menu->sym && menu->sym->name)
+ name = g_strdup_printf(_(menu->sym->name));
+ else
+ name = g_strdup("");
+
+ buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w));
+ gtk_text_buffer_get_bounds(buffer, &start, &end);
+ gtk_text_buffer_delete(buffer, &start, &end);
+ gtk_text_view_set_left_margin(GTK_TEXT_VIEW(text_w), 15);
+
+ gtk_text_buffer_get_end_iter(buffer, &end);
+ gtk_text_buffer_insert_with_tags(buffer, &end, prompt, -1, tag1,
+ NULL);
+ gtk_text_buffer_insert_at_cursor(buffer, " ", 1);
+ gtk_text_buffer_get_end_iter(buffer, &end);
+ gtk_text_buffer_insert_with_tags(buffer, &end, name, -1, tag1,
+ NULL);
+ gtk_text_buffer_insert_at_cursor(buffer, "\n\n", 2);
+ gtk_text_buffer_get_end_iter(buffer, &end);
+ gtk_text_buffer_insert_with_tags(buffer, &end, help, -1, tag2,
+ NULL);
+}
+
+
+static void text_insert_msg(const char *title, const char *message)
+{
+ GtkTextBuffer *buffer;
+ GtkTextIter start, end;
+ const char *msg = message;
+
+ buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w));
+ gtk_text_buffer_get_bounds(buffer, &start, &end);
+ gtk_text_buffer_delete(buffer, &start, &end);
+ gtk_text_view_set_left_margin(GTK_TEXT_VIEW(text_w), 15);
+
+ gtk_text_buffer_get_end_iter(buffer, &end);
+ gtk_text_buffer_insert_with_tags(buffer, &end, title, -1, tag1,
+ NULL);
+ gtk_text_buffer_insert_at_cursor(buffer, "\n\n", 2);
+ gtk_text_buffer_get_end_iter(buffer, &end);
+ gtk_text_buffer_insert_with_tags(buffer, &end, msg, -1, tag2,
+ NULL);
+}
+
+
+/* Main Windows Callbacks */
+
+void on_save1_activate(GtkMenuItem * menuitem, gpointer user_data);
+gboolean on_window1_delete_event(GtkWidget * widget, GdkEvent * event,
+ gpointer user_data)
+{
+ GtkWidget *dialog, *label;
+ gint result;
+
+ if (config_changed == FALSE)
+ return FALSE;
+
+ dialog = gtk_dialog_new_with_buttons(_("Warning !"),
+ GTK_WINDOW(main_wnd),
+ (GtkDialogFlags)
+ (GTK_DIALOG_MODAL |
+ GTK_DIALOG_DESTROY_WITH_PARENT),
+ GTK_STOCK_OK,
+ GTK_RESPONSE_YES,
+ GTK_STOCK_NO,
+ GTK_RESPONSE_NO,
+ GTK_STOCK_CANCEL,
+ GTK_RESPONSE_CANCEL, NULL);
+ gtk_dialog_set_default_response(GTK_DIALOG(dialog),
+ GTK_RESPONSE_CANCEL);
+
+ label = gtk_label_new(_("\nSave configuration ?\n"));
+ gtk_container_add(GTK_CONTAINER(GTK_DIALOG(dialog)->vbox), label);
+ gtk_widget_show(label);
+
+ result = gtk_dialog_run(GTK_DIALOG(dialog));
+ switch (result) {
+ case GTK_RESPONSE_YES:
+ on_save1_activate(NULL, NULL);
+ return FALSE;
+ case GTK_RESPONSE_NO:
+ return FALSE;
+ case GTK_RESPONSE_CANCEL:
+ case GTK_RESPONSE_DELETE_EVENT:
+ default:
+ gtk_widget_destroy(dialog);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+void on_window1_destroy(GtkObject * object, gpointer user_data)
+{
+ gtk_main_quit();
+}
+
+
+void
+on_window1_size_request(GtkWidget * widget,
+ GtkRequisition * requisition, gpointer user_data)
+{
+ static gint old_h;
+ gint w, h;
+
+ if (widget->window == NULL)
+ gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h);
+ else
+ gdk_window_get_size(widget->window, &w, &h);
+
+ if (h == old_h)
+ return;
+ old_h = h;
+
+ gtk_paned_set_position(GTK_PANED(vpaned), 2 * h / 3);
+}
+
+
+/* Menu & Toolbar Callbacks */
+
+
+static void
+load_filename(GtkFileSelection * file_selector, gpointer user_data)
+{
+ const gchar *fn;
+
+ fn = gtk_file_selection_get_filename(GTK_FILE_SELECTION
+ (user_data));
+
+ if (conf_read(fn))
+ text_insert_msg(_("Error"), _("Unable to load configuration !"));
+ else
+ display_tree(&rootmenu);
+}
+
+void on_load1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkWidget *fs;
+
+ fs = gtk_file_selection_new(_("Load file..."));
+ g_signal_connect(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button),
+ "clicked",
+ G_CALLBACK(load_filename), (gpointer) fs);
+ g_signal_connect_swapped(GTK_OBJECT
+ (GTK_FILE_SELECTION(fs)->ok_button),
+ "clicked", G_CALLBACK(gtk_widget_destroy),
+ (gpointer) fs);
+ g_signal_connect_swapped(GTK_OBJECT
+ (GTK_FILE_SELECTION(fs)->cancel_button),
+ "clicked", G_CALLBACK(gtk_widget_destroy),
+ (gpointer) fs);
+ gtk_widget_show(fs);
+}
+
+
+void on_save1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ if (conf_write(NULL))
+ text_insert_msg(_("Error"), _("Unable to save configuration !"));
+
+ config_changed = FALSE;
+}
+
+
+static void
+store_filename(GtkFileSelection * file_selector, gpointer user_data)
+{
+ const gchar *fn;
+
+ fn = gtk_file_selection_get_filename(GTK_FILE_SELECTION
+ (user_data));
+
+ if (conf_write(fn))
+ text_insert_msg(_("Error"), _("Unable to save configuration !"));
+
+ gtk_widget_destroy(GTK_WIDGET(user_data));
+}
+
+void on_save_as1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkWidget *fs;
+
+ fs = gtk_file_selection_new(_("Save file as..."));
+ g_signal_connect(GTK_OBJECT(GTK_FILE_SELECTION(fs)->ok_button),
+ "clicked",
+ G_CALLBACK(store_filename), (gpointer) fs);
+ g_signal_connect_swapped(GTK_OBJECT
+ (GTK_FILE_SELECTION(fs)->ok_button),
+ "clicked", G_CALLBACK(gtk_widget_destroy),
+ (gpointer) fs);
+ g_signal_connect_swapped(GTK_OBJECT
+ (GTK_FILE_SELECTION(fs)->cancel_button),
+ "clicked", G_CALLBACK(gtk_widget_destroy),
+ (gpointer) fs);
+ gtk_widget_show(fs);
+}
+
+
+void on_quit1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ if (!on_window1_delete_event(NULL, NULL, NULL))
+ gtk_widget_destroy(GTK_WIDGET(main_wnd));
+}
+
+
+void on_show_name1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkTreeViewColumn *col;
+
+ show_name = GTK_CHECK_MENU_ITEM(menuitem)->active;
+ col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_NAME);
+ if (col)
+ gtk_tree_view_column_set_visible(col, show_name);
+}
+
+
+void on_show_range1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkTreeViewColumn *col;
+
+ show_range = GTK_CHECK_MENU_ITEM(menuitem)->active;
+ col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_NO);
+ if (col)
+ gtk_tree_view_column_set_visible(col, show_range);
+ col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_MOD);
+ if (col)
+ gtk_tree_view_column_set_visible(col, show_range);
+ col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_YES);
+ if (col)
+ gtk_tree_view_column_set_visible(col, show_range);
+
+}
+
+
+void on_show_data1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkTreeViewColumn *col;
+
+ show_value = GTK_CHECK_MENU_ITEM(menuitem)->active;
+ col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), COL_VALUE);
+ if (col)
+ gtk_tree_view_column_set_visible(col, show_value);
+}
+
+
+void
+on_show_all_options1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ show_all = GTK_CHECK_MENU_ITEM(menuitem)->active;
+
+ gtk_tree_store_clear(tree2);
+ display_tree(&rootmenu); // instead of update_tree to speed-up
+}
+
+
+void
+on_show_debug_info1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ show_debug = GTK_CHECK_MENU_ITEM(menuitem)->active;
+ update_tree(&rootmenu, NULL);
+}
+
+
+void on_introduction1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkWidget *dialog;
+ const gchar *intro_text = _(
+ "Welcome to gkc, the GTK+ graphical kernel configuration tool\n"
+ "for LWK.\n"
+ "For each option, a blank box indicates the feature is disabled, a\n"
+ "check indicates it is enabled, and a dot indicates that it is to\n"
+ "be compiled as a module. Clicking on the box will cycle through the three states.\n"
+ "\n"
+ "If you do not see an option (e.g., a device driver) that you\n"
+ "believe should be present, try turning on Show All Options\n"
+ "under the Options menu.\n"
+ "Although there is no cross reference yet to help you figure out\n"
+ "what other options must be enabled to support the option you\n"
+ "are interested in, you can still view the help of a grayed-out\n"
+ "option.\n"
+ "\n"
+ "Toggling Show Debug Info under the Options menu will show \n"
+ "the dependencies, which you can then match by examining other options.");
+
+ dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd),
+ GTK_DIALOG_DESTROY_WITH_PARENT,
+ GTK_MESSAGE_INFO,
+ GTK_BUTTONS_CLOSE, intro_text);
+ g_signal_connect_swapped(GTK_OBJECT(dialog), "response",
+ G_CALLBACK(gtk_widget_destroy),
+ GTK_OBJECT(dialog));
+ gtk_widget_show_all(dialog);
+}
+
+
+void on_about1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkWidget *dialog;
+ const gchar *about_text =
+ _("gkc is copyright (c) 2002 Romain Lievin <roms@lpg.ticalc.org>.\n"
+ "Based on the source code from Roman Zippel.\n");
+
+ dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd),
+ GTK_DIALOG_DESTROY_WITH_PARENT,
+ GTK_MESSAGE_INFO,
+ GTK_BUTTONS_CLOSE, about_text);
+ g_signal_connect_swapped(GTK_OBJECT(dialog), "response",
+ G_CALLBACK(gtk_widget_destroy),
+ GTK_OBJECT(dialog));
+ gtk_widget_show_all(dialog);
+}
+
+
+void on_license1_activate(GtkMenuItem * menuitem, gpointer user_data)
+{
+ GtkWidget *dialog;
+ const gchar *license_text =
+ _("gkc is released under the terms of the GNU GPL v2.\n"
+ "For more information, please see the source code or\n"
+ "visit http://www.fsf.org/licenses/licenses.html\n");
+
+ dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd),
+ GTK_DIALOG_DESTROY_WITH_PARENT,
+ GTK_MESSAGE_INFO,
+ GTK_BUTTONS_CLOSE, license_text);
+ g_signal_connect_swapped(GTK_OBJECT(dialog), "response",
+ G_CALLBACK(gtk_widget_destroy),
+ GTK_OBJECT(dialog));
+ gtk_widget_show_all(dialog);
+}
+
+
+void on_back_clicked(GtkButton * button, gpointer user_data)
+{
+ enum prop_type ptype;
+
+ current = current->parent;
+ ptype = current->prompt ? current->prompt->type : P_UNKNOWN;
+ if (ptype != P_MENU)
+ current = current->parent;
+ display_tree_part();
+
+ if (current == &rootmenu)
+ gtk_widget_set_sensitive(back_btn, FALSE);
+}
+
+
+void on_load_clicked(GtkButton * button, gpointer user_data)
+{
+ on_load1_activate(NULL, user_data);
+}
+
+
+void on_save_clicked(GtkButton * button, gpointer user_data)
+{
+ on_save1_activate(NULL, user_data);
+}
+
+
+void on_single_clicked(GtkButton * button, gpointer user_data)
+{
+ view_mode = SINGLE_VIEW;
+ gtk_paned_set_position(GTK_PANED(hpaned), 0);
+ gtk_widget_hide(tree1_w);
+ current = &rootmenu;
+ display_tree_part();
+}
+
+
+void on_split_clicked(GtkButton * button, gpointer user_data)
+{
+ gint w, h;
+ view_mode = SPLIT_VIEW;
+ gtk_widget_show(tree1_w);
+ gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h);
+ gtk_paned_set_position(GTK_PANED(hpaned), w / 2);
+ if (tree2)
+ gtk_tree_store_clear(tree2);
+ display_list();
+
+ /* Disable back btn, like in full mode. */
+ gtk_widget_set_sensitive(back_btn, FALSE);
+}
+
+
+void on_full_clicked(GtkButton * button, gpointer user_data)
+{
+ view_mode = FULL_VIEW;
+ gtk_paned_set_position(GTK_PANED(hpaned), 0);
+ gtk_widget_hide(tree1_w);
+ if (tree2)
+ gtk_tree_store_clear(tree2);
+ display_tree(&rootmenu);
+ gtk_widget_set_sensitive(back_btn, FALSE);
+}
+
+
+void on_collapse_clicked(GtkButton * button, gpointer user_data)
+{
+ gtk_tree_view_collapse_all(GTK_TREE_VIEW(tree2_w));
+}
+
+
+void on_expand_clicked(GtkButton * button, gpointer user_data)
+{
+ gtk_tree_view_expand_all(GTK_TREE_VIEW(tree2_w));
+}
+
+
+/* CTree Callbacks */
+
+/* Change hex/int/string value in the cell */
+static void renderer_edited(GtkCellRendererText * cell,
+ const gchar * path_string,
+ const gchar * new_text, gpointer user_data)
+{
+ GtkTreePath *path = gtk_tree_path_new_from_string(path_string);
+ GtkTreeIter iter;
+ const char *old_def, *new_def;
+ struct menu *menu;
+ struct symbol *sym;
+
+ if (!gtk_tree_model_get_iter(model2, &iter, path))
+ return;
+
+ gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
+ sym = menu->sym;
+
+ gtk_tree_model_get(model2, &iter, COL_VALUE, &old_def, -1);
+ new_def = new_text;
+
+ sym_set_string_value(sym, new_def);
+
+ config_changed = TRUE;
+ update_tree(&rootmenu, NULL);
+
+ gtk_tree_path_free(path);
+}
+
+/* Change the value of a symbol and update the tree */
+static void change_sym_value(struct menu *menu, gint col)
+{
+ struct symbol *sym = menu->sym;
+ tristate oldval, newval;
+
+ if (!sym)
+ return;
+
+ if (col == COL_NO)
+ newval = no;
+ else if (col == COL_MOD)
+ newval = mod;
+ else if (col == COL_YES)
+ newval = yes;
+ else
+ return;
+
+ switch (sym_get_type(sym)) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ oldval = sym_get_tristate_value(sym);
+ if (!sym_tristate_within_range(sym, newval))
+ newval = yes;
+ sym_set_tristate_value(sym, newval);
+ config_changed = TRUE;
+ if (view_mode == FULL_VIEW)
+ update_tree(&rootmenu, NULL);
+ else if (view_mode == SPLIT_VIEW) {
+ update_tree(browsed, NULL);
+ display_list();
+ }
+ else if (view_mode == SINGLE_VIEW)
+ display_tree_part(); //fixme: keep exp/coll
+ break;
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+ default:
+ break;
+ }
+}
+
+static void toggle_sym_value(struct menu *menu)
+{
+ if (!menu->sym)
+ return;
+
+ sym_toggle_tristate_value(menu->sym);
+ if (view_mode == FULL_VIEW)
+ update_tree(&rootmenu, NULL);
+ else if (view_mode == SPLIT_VIEW) {
+ update_tree(browsed, NULL);
+ display_list();
+ }
+ else if (view_mode == SINGLE_VIEW)
+ display_tree_part(); //fixme: keep exp/coll
+}
+
+static void renderer_toggled(GtkCellRendererToggle * cell,
+ gchar * path_string, gpointer user_data)
+{
+ GtkTreePath *path, *sel_path = NULL;
+ GtkTreeIter iter, sel_iter;
+ GtkTreeSelection *sel;
+ struct menu *menu;
+
+ path = gtk_tree_path_new_from_string(path_string);
+ if (!gtk_tree_model_get_iter(model2, &iter, path))
+ return;
+
+ sel = gtk_tree_view_get_selection(GTK_TREE_VIEW(tree2_w));
+ if (gtk_tree_selection_get_selected(sel, NULL, &sel_iter))
+ sel_path = gtk_tree_model_get_path(model2, &sel_iter);
+ if (!sel_path)
+ goto out1;
+ if (gtk_tree_path_compare(path, sel_path))
+ goto out2;
+
+ gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
+ toggle_sym_value(menu);
+
+ out2:
+ gtk_tree_path_free(sel_path);
+ out1:
+ gtk_tree_path_free(path);
+}
+
+static gint column2index(GtkTreeViewColumn * column)
+{
+ gint i;
+
+ for (i = 0; i < COL_NUMBER; i++) {
+ GtkTreeViewColumn *col;
+
+ col = gtk_tree_view_get_column(GTK_TREE_VIEW(tree2_w), i);
+ if (col == column)
+ return i;
+ }
+
+ return -1;
+}
+
+
+/* User click: update choice (full) or goes down (single) */
+gboolean
+on_treeview2_button_press_event(GtkWidget * widget,
+ GdkEventButton * event, gpointer user_data)
+{
+ GtkTreeView *view = GTK_TREE_VIEW(widget);
+ GtkTreePath *path;
+ GtkTreeViewColumn *column;
+ GtkTreeIter iter;
+ struct menu *menu;
+ gint col;
+
+#if GTK_CHECK_VERSION(2,1,4) // bug in ctree with earlier version of GTK
+ gint tx = (gint) event->x;
+ gint ty = (gint) event->y;
+ gint cx, cy;
+
+ gtk_tree_view_get_path_at_pos(view, tx, ty, &path, &column, &cx,
+ &cy);
+#else
+ gtk_tree_view_get_cursor(view, &path, &column);
+#endif
+ if (path == NULL)
+ return FALSE;
+
+ if (!gtk_tree_model_get_iter(model2, &iter, path))
+ return FALSE;
+ gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
+
+ col = column2index(column);
+ if (event->type == GDK_2BUTTON_PRESS) {
+ enum prop_type ptype;
+ ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+
+ if (ptype == P_MENU && view_mode != FULL_VIEW && col == COL_OPTION) {
+ // goes down into menu
+ current = menu;
+ display_tree_part();
+ gtk_widget_set_sensitive(back_btn, TRUE);
+ } else if ((col == COL_OPTION)) {
+ toggle_sym_value(menu);
+ gtk_tree_view_expand_row(view, path, TRUE);
+ }
+ } else {
+ if (col == COL_VALUE) {
+ toggle_sym_value(menu);
+ gtk_tree_view_expand_row(view, path, TRUE);
+ } else if (col == COL_NO || col == COL_MOD
+ || col == COL_YES) {
+ change_sym_value(menu, col);
+ gtk_tree_view_expand_row(view, path, TRUE);
+ }
+ }
+
+ return FALSE;
+}
+
+/* Key pressed: update choice */
+gboolean
+on_treeview2_key_press_event(GtkWidget * widget,
+ GdkEventKey * event, gpointer user_data)
+{
+ GtkTreeView *view = GTK_TREE_VIEW(widget);
+ GtkTreePath *path;
+ GtkTreeViewColumn *column;
+ GtkTreeIter iter;
+ struct menu *menu;
+ gint col;
+
+ gtk_tree_view_get_cursor(view, &path, &column);
+ if (path == NULL)
+ return FALSE;
+
+ if (event->keyval == GDK_space) {
+ if (gtk_tree_view_row_expanded(view, path))
+ gtk_tree_view_collapse_row(view, path);
+ else
+ gtk_tree_view_expand_row(view, path, FALSE);
+ return TRUE;
+ }
+ if (event->keyval == GDK_KP_Enter) {
+ }
+ if (widget == tree1_w)
+ return FALSE;
+
+ gtk_tree_model_get_iter(model2, &iter, path);
+ gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
+
+ if (!strcasecmp(event->string, "n"))
+ col = COL_NO;
+ else if (!strcasecmp(event->string, "m"))
+ col = COL_MOD;
+ else if (!strcasecmp(event->string, "y"))
+ col = COL_YES;
+ else
+ col = -1;
+ change_sym_value(menu, col);
+
+ return FALSE;
+}
+
+
+/* Row selection changed: update help */
+void
+on_treeview2_cursor_changed(GtkTreeView * treeview, gpointer user_data)
+{
+ GtkTreeSelection *selection;
+ GtkTreeIter iter;
+ struct menu *menu;
+
+ selection = gtk_tree_view_get_selection(treeview);
+ if (gtk_tree_selection_get_selected(selection, &model2, &iter)) {
+ gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1);
+ text_insert_help(menu);
+ }
+}
+
+
+/* User click: display sub-tree in the right frame. */
+gboolean
+on_treeview1_button_press_event(GtkWidget * widget,
+ GdkEventButton * event, gpointer user_data)
+{
+ GtkTreeView *view = GTK_TREE_VIEW(widget);
+ GtkTreePath *path;
+ GtkTreeViewColumn *column;
+ GtkTreeIter iter;
+ struct menu *menu;
+
+ gint tx = (gint) event->x;
+ gint ty = (gint) event->y;
+ gint cx, cy;
+
+ gtk_tree_view_get_path_at_pos(view, tx, ty, &path, &column, &cx,
+ &cy);
+ if (path == NULL)
+ return FALSE;
+
+ gtk_tree_model_get_iter(model1, &iter, path);
+ gtk_tree_model_get(model1, &iter, COL_MENU, &menu, -1);
+
+ if (event->type == GDK_2BUTTON_PRESS) {
+ toggle_sym_value(menu);
+ current = menu;
+ display_tree_part();
+ } else {
+ browsed = menu;
+ display_tree_part();
+ }
+
+ gtk_widget_realize(tree2_w);
+ gtk_tree_view_set_cursor(view, path, NULL, FALSE);
+ gtk_widget_grab_focus(tree2_w);
+
+ return FALSE;
+}
+
+
+/* Fill a row of strings */
+static gchar **fill_row(struct menu *menu)
+{
+ static gchar *row[COL_NUMBER];
+ struct symbol *sym = menu->sym;
+ const char *def;
+ int stype;
+ tristate val;
+ enum prop_type ptype;
+ int i;
+
+ for (i = COL_OPTION; i <= COL_COLOR; i++)
+ g_free(row[i]);
+ bzero(row, sizeof(row));
+
+ row[COL_OPTION] =
+ g_strdup_printf("%s %s", menu_get_prompt(menu),
+ sym ? (sym->
+ flags & SYMBOL_NEW ? "(NEW)" : "") :
+ "");
+
+ if (show_all && !menu_is_visible(menu))
+ row[COL_COLOR] = g_strdup("DarkGray");
+ else
+ row[COL_COLOR] = g_strdup("Black");
+
+ ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ switch (ptype) {
+ case P_MENU:
+ row[COL_PIXBUF] = (gchar *) xpm_menu;
+ if (view_mode == SINGLE_VIEW)
+ row[COL_PIXVIS] = GINT_TO_POINTER(TRUE);
+ row[COL_BTNVIS] = GINT_TO_POINTER(FALSE);
+ break;
+ case P_COMMENT:
+ row[COL_PIXBUF] = (gchar *) xpm_void;
+ row[COL_PIXVIS] = GINT_TO_POINTER(FALSE);
+ row[COL_BTNVIS] = GINT_TO_POINTER(FALSE);
+ break;
+ default:
+ row[COL_PIXBUF] = (gchar *) xpm_void;
+ row[COL_PIXVIS] = GINT_TO_POINTER(FALSE);
+ row[COL_BTNVIS] = GINT_TO_POINTER(TRUE);
+ break;
+ }
+
+ if (!sym)
+ return row;
+ row[COL_NAME] = g_strdup(sym->name);
+
+ sym_calc_value(sym);
+ sym->flags &= ~SYMBOL_CHANGED;
+
+ if (sym_is_choice(sym)) { // parse childs for getting final value
+ struct menu *child;
+ struct symbol *def_sym = sym_get_choice_value(sym);
+ struct menu *def_menu = NULL;
+
+ row[COL_BTNVIS] = GINT_TO_POINTER(FALSE);
+
+ for (child = menu->list; child; child = child->next) {
+ if (menu_is_visible(child)
+ && child->sym == def_sym)
+ def_menu = child;
+ }
+
+ if (def_menu)
+ row[COL_VALUE] =
+ g_strdup(menu_get_prompt(def_menu));
+ }
+ if (sym->flags & SYMBOL_CHOICEVAL)
+ row[COL_BTNRAD] = GINT_TO_POINTER(TRUE);
+
+ stype = sym_get_type(sym);
+ switch (stype) {
+ case S_BOOLEAN:
+ if (GPOINTER_TO_INT(row[COL_PIXVIS]) == FALSE)
+ row[COL_BTNVIS] = GINT_TO_POINTER(TRUE);
+ if (sym_is_choice(sym))
+ break;
+ case S_TRISTATE:
+ val = sym_get_tristate_value(sym);
+ switch (val) {
+ case no:
+ row[COL_NO] = g_strdup("N");
+ row[COL_VALUE] = g_strdup("N");
+ row[COL_BTNACT] = GINT_TO_POINTER(FALSE);
+ row[COL_BTNINC] = GINT_TO_POINTER(FALSE);
+ break;
+ case mod:
+ row[COL_MOD] = g_strdup("M");
+ row[COL_VALUE] = g_strdup("M");
+ row[COL_BTNINC] = GINT_TO_POINTER(TRUE);
+ break;
+ case yes:
+ row[COL_YES] = g_strdup("Y");
+ row[COL_VALUE] = g_strdup("Y");
+ row[COL_BTNACT] = GINT_TO_POINTER(TRUE);
+ row[COL_BTNINC] = GINT_TO_POINTER(FALSE);
+ break;
+ }
+
+ if (val != no && sym_tristate_within_range(sym, no))
+ row[COL_NO] = g_strdup("_");
+ if (val != mod && sym_tristate_within_range(sym, mod))
+ row[COL_MOD] = g_strdup("_");
+ if (val != yes && sym_tristate_within_range(sym, yes))
+ row[COL_YES] = g_strdup("_");
+ break;
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+ def = sym_get_string_value(sym);
+ row[COL_VALUE] = g_strdup(def);
+ row[COL_EDIT] = GINT_TO_POINTER(TRUE);
+ row[COL_BTNVIS] = GINT_TO_POINTER(FALSE);
+ break;
+ }
+
+ return row;
+}
+
+
+/* Set the node content with a row of strings */
+static void set_node(GtkTreeIter * node, struct menu *menu, gchar ** row)
+{
+ GdkColor color;
+ gboolean success;
+ GdkPixbuf *pix;
+
+ pix = gdk_pixbuf_new_from_xpm_data((const char **)
+ row[COL_PIXBUF]);
+
+ gdk_color_parse(row[COL_COLOR], &color);
+ gdk_colormap_alloc_colors(gdk_colormap_get_system(), &color, 1,
+ FALSE, FALSE, &success);
+
+ gtk_tree_store_set(tree, node,
+ COL_OPTION, row[COL_OPTION],
+ COL_NAME, row[COL_NAME],
+ COL_NO, row[COL_NO],
+ COL_MOD, row[COL_MOD],
+ COL_YES, row[COL_YES],
+ COL_VALUE, row[COL_VALUE],
+ COL_MENU, (gpointer) menu,
+ COL_COLOR, &color,
+ COL_EDIT, GPOINTER_TO_INT(row[COL_EDIT]),
+ COL_PIXBUF, pix,
+ COL_PIXVIS, GPOINTER_TO_INT(row[COL_PIXVIS]),
+ COL_BTNVIS, GPOINTER_TO_INT(row[COL_BTNVIS]),
+ COL_BTNACT, GPOINTER_TO_INT(row[COL_BTNACT]),
+ COL_BTNINC, GPOINTER_TO_INT(row[COL_BTNINC]),
+ COL_BTNRAD, GPOINTER_TO_INT(row[COL_BTNRAD]),
+ -1);
+
+ g_object_unref(pix);
+}
+
+
+/* Add a node to the tree */
+static void place_node(struct menu *menu, char **row)
+{
+ GtkTreeIter *parent = parents[indent - 1];
+ GtkTreeIter *node = parents[indent];
+
+ gtk_tree_store_append(tree, node, parent);
+ set_node(node, menu, row);
+}
+
+
+/* Find a node in the GTK+ tree */
+static GtkTreeIter found;
+
+/*
+ * Find a menu in the GtkTree starting at parent.
+ */
+GtkTreeIter *gtktree_iter_find_node(GtkTreeIter * parent,
+ struct menu *tofind)
+{
+ GtkTreeIter iter;
+ GtkTreeIter *child = &iter;
+ gboolean valid;
+ GtkTreeIter *ret;
+
+ valid = gtk_tree_model_iter_children(model2, child, parent);
+ while (valid) {
+ struct menu *menu;
+
+ gtk_tree_model_get(model2, child, 6, &menu, -1);
+
+ if (menu == tofind) {
+ memcpy(&found, child, sizeof(GtkTreeIter));
+ return &found;
+ }
+
+ ret = gtktree_iter_find_node(child, tofind);
+ if (ret)
+ return ret;
+
+ valid = gtk_tree_model_iter_next(model2, child);
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Update the tree by adding/removing entries
+ * Does not change other nodes
+ */
+static void update_tree(struct menu *src, GtkTreeIter * dst)
+{
+ struct menu *child1;
+ GtkTreeIter iter, tmp;
+ GtkTreeIter *child2 = &iter;
+ gboolean valid;
+ GtkTreeIter *sibling;
+ struct symbol *sym;
+ struct property *prop;
+ struct menu *menu1, *menu2;
+
+ if (src == &rootmenu)
+ indent = 1;
+
+ valid = gtk_tree_model_iter_children(model2, child2, dst);
+ for (child1 = src->list; child1; child1 = child1->next) {
+
+ prop = child1->prompt;
+ sym = child1->sym;
+
+ reparse:
+ menu1 = child1;
+ if (valid)
+ gtk_tree_model_get(model2, child2, COL_MENU,
+ &menu2, -1);
+ else
+ menu2 = NULL; // force adding of a first child
+
+#ifdef DEBUG
+ printf("%*c%s | %s\n", indent, ' ',
+ menu1 ? menu_get_prompt(menu1) : "nil",
+ menu2 ? menu_get_prompt(menu2) : "nil");
+#endif
+
+ if (!menu_is_visible(child1) && !show_all) { // remove node
+ if (gtktree_iter_find_node(dst, menu1) != NULL) {
+ memcpy(&tmp, child2, sizeof(GtkTreeIter));
+ valid = gtk_tree_model_iter_next(model2,
+ child2);
+ gtk_tree_store_remove(tree2, &tmp);
+ if (!valid)
+ return; // next parent
+ else
+ goto reparse; // next child
+ } else
+ continue;
+ }
+
+ if (menu1 != menu2) {
+ if (gtktree_iter_find_node(dst, menu1) == NULL) { // add node
+ if (!valid && !menu2)
+ sibling = NULL;
+ else
+ sibling = child2;
+ gtk_tree_store_insert_before(tree2,
+ child2,
+ dst, sibling);
+ set_node(child2, menu1, fill_row(menu1));
+ if (menu2 == NULL)
+ valid = TRUE;
+ } else { // remove node
+ memcpy(&tmp, child2, sizeof(GtkTreeIter));
+ valid = gtk_tree_model_iter_next(model2,
+ child2);
+ gtk_tree_store_remove(tree2, &tmp);
+ if (!valid)
+ return; // next parent
+ else
+ goto reparse; // next child
+ }
+ } else if (sym && (sym->flags & SYMBOL_CHANGED)) {
+ set_node(child2, menu1, fill_row(menu1));
+ }
+
+ indent++;
+ update_tree(child1, child2);
+ indent--;
+
+ valid = gtk_tree_model_iter_next(model2, child2);
+ }
+}
+
+
+/* Display the whole tree (single/split/full view) */
+static void display_tree(struct menu *menu)
+{
+ struct symbol *sym;
+ struct property *prop;
+ struct menu *child;
+ enum prop_type ptype;
+
+ if (menu == &rootmenu) {
+ indent = 1;
+ current = &rootmenu;
+ }
+
+ for (child = menu->list; child; child = child->next) {
+ prop = child->prompt;
+ sym = child->sym;
+ ptype = prop ? prop->type : P_UNKNOWN;
+
+ if (sym)
+ sym->flags &= ~SYMBOL_CHANGED;
+
+ if ((view_mode == SPLIT_VIEW)
+ && !(child->flags & MENU_ROOT) && (tree == tree1))
+ continue;
+
+ if ((view_mode == SPLIT_VIEW) && (child->flags & MENU_ROOT)
+ && (tree == tree2))
+ continue;
+
+ if (menu_is_visible(child) || show_all)
+ place_node(child, fill_row(child));
+#ifdef DEBUG
+ printf("%*c%s: ", indent, ' ', menu_get_prompt(child));
+ printf("%s", child->flags & MENU_ROOT ? "rootmenu | " : "");
+ dbg_print_ptype(ptype);
+ printf(" | ");
+ if (sym) {
+ dbg_print_stype(sym->type);
+ printf(" | ");
+ dbg_print_flags(sym->flags);
+ printf("\n");
+ } else
+ printf("\n");
+#endif
+ if ((view_mode != FULL_VIEW) && (ptype == P_MENU)
+ && (tree == tree2))
+ continue;
+/*
+ if (((menu != &rootmenu) && !(menu->flags & MENU_ROOT))
+ || (view_mode == FULL_VIEW)
+ || (view_mode == SPLIT_VIEW))*/
+ if (((view_mode == SINGLE_VIEW) && (menu->flags & MENU_ROOT))
+ || (view_mode == FULL_VIEW)
+ || (view_mode == SPLIT_VIEW)) {
+ indent++;
+ display_tree(child);
+ indent--;
+ }
+ }
+}
+
+/* Display a part of the tree starting at current node (single/split view) */
+static void display_tree_part(void)
+{
+ if (tree2)
+ gtk_tree_store_clear(tree2);
+ if (view_mode == SINGLE_VIEW)
+ display_tree(current);
+ else if (view_mode == SPLIT_VIEW)
+ display_tree(browsed);
+ gtk_tree_view_expand_all(GTK_TREE_VIEW(tree2_w));
+}
+
+/* Display the list in the left frame (split view) */
+static void display_list(void)
+{
+ if (tree1)
+ gtk_tree_store_clear(tree1);
+
+ tree = tree1;
+ display_tree(&rootmenu);
+ gtk_tree_view_expand_all(GTK_TREE_VIEW(tree1_w));
+ tree = tree2;
+}
+
+void fixup_rootmenu(struct menu *menu)
+{
+ struct menu *child;
+ static int menu_cnt = 0;
+
+ menu->flags |= MENU_ROOT;
+ for (child = menu->list; child; child = child->next) {
+ if (child->prompt && child->prompt->type == P_MENU) {
+ menu_cnt++;
+ fixup_rootmenu(child);
+ menu_cnt--;
+ } else if (!menu_cnt)
+ fixup_rootmenu(child);
+ }
+}
+
+
+/* Main */
+int main(int ac, char *av[])
+{
+ const char *name;
+ char *env;
+ gchar *glade_file;
+
+#ifndef LKC_DIRECT_LINK
+ kconfig_load();
+#endif
+
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ bind_textdomain_codeset(PACKAGE, "UTF-8");
+ textdomain(PACKAGE);
+
+ /* GTK stuffs */
+ gtk_set_locale();
+ gtk_init(&ac, &av);
+ glade_init();
+
+ //add_pixmap_directory (PACKAGE_DATA_DIR "/" PACKAGE "/pixmaps");
+ //add_pixmap_directory (PACKAGE_SOURCE_DIR "/pixmaps");
+
+ /* Determine GUI path */
+ env = getenv(SRCTREE);
+ if (env)
+ glade_file = g_strconcat(env, "/scripts/kconfig/gconf.glade", NULL);
+ else if (av[0][0] == '/')
+ glade_file = g_strconcat(av[0], ".glade", NULL);
+ else
+ glade_file = g_strconcat(g_get_current_dir(), "/", av[0], ".glade", NULL);
+
+ /* Load the interface and connect signals */
+ init_main_window(glade_file);
+ init_tree_model();
+ init_left_tree();
+ init_right_tree();
+
+ /* Conf stuffs */
+ if (ac > 1 && av[1][0] == '-') {
+ switch (av[1][1]) {
+ case 'a':
+ //showAll = 1;
+ break;
+ case 'h':
+ case '?':
+ printf("%s <config>\n", av[0]);
+ exit(0);
+ }
+ name = av[2];
+ } else
+ name = av[1];
+
+ conf_parse(name);
+ fixup_rootmenu(&rootmenu);
+ conf_read(NULL);
+
+ switch (view_mode) {
+ case SINGLE_VIEW:
+ display_tree_part();
+ break;
+ case SPLIT_VIEW:
+ display_list();
+ break;
+ case FULL_VIEW:
+ display_tree(&rootmenu);
+ break;
+ }
+
+ gtk_main();
+
+ return 0;
+}
--- /dev/null
+<?xml version="1.0" standalone="no"?> <!--*- mode: xml -*-->
+<!DOCTYPE glade-interface SYSTEM "http://glade.gnome.org/glade-2.0.dtd">
+
+<glade-interface>
+
+<widget class="GtkWindow" id="window1">
+ <property name="visible">True</property>
+ <property name="title" translatable="yes">Gtk Kernel Configurator</property>
+ <property name="type">GTK_WINDOW_TOPLEVEL</property>
+ <property name="window_position">GTK_WIN_POS_NONE</property>
+ <property name="modal">False</property>
+ <property name="default_width">640</property>
+ <property name="default_height">480</property>
+ <property name="resizable">True</property>
+ <property name="destroy_with_parent">False</property>
+ <property name="decorated">True</property>
+ <property name="skip_taskbar_hint">False</property>
+ <property name="skip_pager_hint">False</property>
+ <property name="type_hint">GDK_WINDOW_TYPE_HINT_NORMAL</property>
+ <property name="gravity">GDK_GRAVITY_NORTH_WEST</property>
+ <signal name="destroy" handler="on_window1_destroy" object="window1"/>
+ <signal name="size_request" handler="on_window1_size_request" object="vpaned1" last_modification_time="Fri, 11 Jan 2002 16:17:11 GMT"/>
+ <signal name="delete_event" handler="on_window1_delete_event" object="window1" last_modification_time="Sun, 09 Mar 2003 19:42:46 GMT"/>
+
+ <child>
+ <widget class="GtkVBox" id="vbox1">
+ <property name="visible">True</property>
+ <property name="homogeneous">False</property>
+ <property name="spacing">0</property>
+
+ <child>
+ <widget class="GtkMenuBar" id="menubar1">
+ <property name="visible">True</property>
+
+ <child>
+ <widget class="GtkMenuItem" id="file1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">_File</property>
+ <property name="use_underline">True</property>
+
+ <child>
+ <widget class="GtkMenu" id="file1_menu">
+
+ <child>
+ <widget class="GtkImageMenuItem" id="load1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Load a config file</property>
+ <property name="label" translatable="yes">_Load</property>
+ <property name="use_underline">True</property>
+ <signal name="activate" handler="on_load1_activate"/>
+ <accelerator key="L" modifiers="GDK_CONTROL_MASK" signal="activate"/>
+
+ <child internal-child="image">
+ <widget class="GtkImage" id="image39">
+ <property name="visible">True</property>
+ <property name="stock">gtk-open</property>
+ <property name="icon_size">1</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkImageMenuItem" id="save1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Save the config in .config</property>
+ <property name="label" translatable="yes">_Save</property>
+ <property name="use_underline">True</property>
+ <signal name="activate" handler="on_save1_activate"/>
+ <accelerator key="S" modifiers="GDK_CONTROL_MASK" signal="activate"/>
+
+ <child internal-child="image">
+ <widget class="GtkImage" id="image40">
+ <property name="visible">True</property>
+ <property name="stock">gtk-save</property>
+ <property name="icon_size">1</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkImageMenuItem" id="save_as1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Save the config in a file</property>
+ <property name="label" translatable="yes">Save _as</property>
+ <property name="use_underline">True</property>
+ <signal name="activate" handler="on_save_as1_activate"/>
+
+ <child internal-child="image">
+ <widget class="GtkImage" id="image41">
+ <property name="visible">True</property>
+ <property name="stock">gtk-save-as</property>
+ <property name="icon_size">1</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkSeparatorMenuItem" id="separator1">
+ <property name="visible">True</property>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkImageMenuItem" id="quit1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">_Quit</property>
+ <property name="use_underline">True</property>
+ <signal name="activate" handler="on_quit1_activate"/>
+ <accelerator key="Q" modifiers="GDK_CONTROL_MASK" signal="activate"/>
+
+ <child internal-child="image">
+ <widget class="GtkImage" id="image42">
+ <property name="visible">True</property>
+ <property name="stock">gtk-quit</property>
+ <property name="icon_size">1</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ </widget>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkMenuItem" id="options1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">_Options</property>
+ <property name="use_underline">True</property>
+
+ <child>
+ <widget class="GtkMenu" id="options1_menu">
+
+ <child>
+ <widget class="GtkCheckMenuItem" id="show_name1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Show name</property>
+ <property name="label" translatable="yes">Show _name</property>
+ <property name="use_underline">True</property>
+ <property name="active">False</property>
+ <signal name="activate" handler="on_show_name1_activate"/>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkCheckMenuItem" id="show_range1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Show range (Y/M/N)</property>
+ <property name="label" translatable="yes">Show _range</property>
+ <property name="use_underline">True</property>
+ <property name="active">False</property>
+ <signal name="activate" handler="on_show_range1_activate"/>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkCheckMenuItem" id="show_data1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Show value of the option</property>
+ <property name="label" translatable="yes">Show _data</property>
+ <property name="use_underline">True</property>
+ <property name="active">False</property>
+ <signal name="activate" handler="on_show_data1_activate"/>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkSeparatorMenuItem" id="separator2">
+ <property name="visible">True</property>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkCheckMenuItem" id="show_all_options1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Show all options</property>
+ <property name="label" translatable="yes">Show all _options</property>
+ <property name="use_underline">True</property>
+ <property name="active">False</property>
+ <signal name="activate" handler="on_show_all_options1_activate"/>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkCheckMenuItem" id="show_debug_info1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Show masked options</property>
+ <property name="label" translatable="yes">Show _debug info</property>
+ <property name="use_underline">True</property>
+ <property name="active">False</property>
+ <signal name="activate" handler="on_show_debug_info1_activate"/>
+ </widget>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkMenuItem" id="help1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">_Help</property>
+ <property name="use_underline">True</property>
+
+ <child>
+ <widget class="GtkMenu" id="help1_menu">
+
+ <child>
+ <widget class="GtkImageMenuItem" id="introduction1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">_Introduction</property>
+ <property name="use_underline">True</property>
+ <signal name="activate" handler="on_introduction1_activate" last_modification_time="Fri, 15 Nov 2002 20:26:30 GMT"/>
+ <accelerator key="I" modifiers="GDK_CONTROL_MASK" signal="activate"/>
+
+ <child internal-child="image">
+ <widget class="GtkImage" id="image43">
+ <property name="visible">True</property>
+ <property name="stock">gtk-dialog-question</property>
+ <property name="icon_size">1</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkImageMenuItem" id="about1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">_About</property>
+ <property name="use_underline">True</property>
+ <signal name="activate" handler="on_about1_activate" last_modification_time="Fri, 15 Nov 2002 20:26:30 GMT"/>
+ <accelerator key="A" modifiers="GDK_CONTROL_MASK" signal="activate"/>
+
+ <child internal-child="image">
+ <widget class="GtkImage" id="image44">
+ <property name="visible">True</property>
+ <property name="stock">gtk-properties</property>
+ <property name="icon_size">1</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ </widget>
+ </child>
+ </widget>
+ </child>
+
+ <child>
+ <widget class="GtkImageMenuItem" id="license1">
+ <property name="visible">True</property>
+ <property name="label" translatable="yes">_License</property>
+ <property name="use_underline">True</property>
+ <signal name="activate" handler="on_license1_activate" last_modification_time="Fri, 15 Nov 2002 20:26:30 GMT"/>
+
+ <child internal-child="image">
+ <widget class="GtkImage" id="image45">
+ <property name="visible">True</property>
+ <property name="stock">gtk-justify-fill</property>
+ <property name="icon_size">1</property>
+ <property name="xalign">0.5</property>
+ <property name="yalign">0.5</property>
+ <property name="xpad">0</property>
+ <property name="ypad">0</property>
+ </widget>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHandleBox" id="handlebox1">
+ <property name="visible">True</property>
+ <property name="shadow_type">GTK_SHADOW_OUT</property>
+ <property name="handle_position">GTK_POS_LEFT</property>
+ <property name="snap_edge">GTK_POS_TOP</property>
+
+ <child>
+ <widget class="GtkToolbar" id="toolbar1">
+ <property name="visible">True</property>
+ <property name="orientation">GTK_ORIENTATION_HORIZONTAL</property>
+ <property name="toolbar_style">GTK_TOOLBAR_BOTH</property>
+ <property name="tooltips">True</property>
+ <property name="show_arrow">True</property>
+
+ <child>
+ <widget class="GtkToolButton" id="button1">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Goes up of one level (single view)</property>
+ <property name="label" translatable="yes">Back</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-undo</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_back_clicked"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolItem" id="toolitem1">
+ <property name="visible">True</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+
+ <child>
+ <widget class="GtkVSeparator" id="vseparator1">
+ <property name="visible">True</property>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolButton" id="button2">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Load a config file</property>
+ <property name="label" translatable="yes">Load</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-open</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_load_clicked"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolButton" id="button3">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Save a config file</property>
+ <property name="label" translatable="yes">Save</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-save</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_save_clicked"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolItem" id="toolitem2">
+ <property name="visible">True</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+
+ <child>
+ <widget class="GtkVSeparator" id="vseparator2">
+ <property name="visible">True</property>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolButton" id="button4">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Single view</property>
+ <property name="label" translatable="yes">Single</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-missing-image</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_single_clicked" last_modification_time="Sun, 12 Jan 2003 14:28:39 GMT"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolButton" id="button5">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Split view</property>
+ <property name="label" translatable="yes">Split</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-missing-image</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_split_clicked" last_modification_time="Sun, 12 Jan 2003 14:28:45 GMT"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolButton" id="button6">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Full view</property>
+ <property name="label" translatable="yes">Full</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-missing-image</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_full_clicked" last_modification_time="Sun, 12 Jan 2003 14:28:50 GMT"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolItem" id="toolitem3">
+ <property name="visible">True</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+
+ <child>
+ <widget class="GtkVSeparator" id="vseparator3">
+ <property name="visible">True</property>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolButton" id="button7">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Collapse the whole tree in the right frame</property>
+ <property name="label" translatable="yes">Collapse</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-remove</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_collapse_clicked"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkToolButton" id="button8">
+ <property name="visible">True</property>
+ <property name="tooltip" translatable="yes">Expand the whole tree in the right frame</property>
+ <property name="label" translatable="yes">Expand</property>
+ <property name="use_underline">True</property>
+ <property name="stock_id">gtk-add</property>
+ <property name="visible_horizontal">True</property>
+ <property name="visible_vertical">True</property>
+ <property name="is_important">False</property>
+ <signal name="clicked" handler="on_expand_clicked"/>
+ </widget>
+ <packing>
+ <property name="expand">False</property>
+ <property name="homogeneous">True</property>
+ </packing>
+ </child>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">False</property>
+ <property name="fill">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkHPaned" id="hpaned1">
+ <property name="width_request">1</property>
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">0</property>
+
+ <child>
+ <widget class="GtkScrolledWindow" id="scrolledwindow1">
+ <property name="visible">True</property>
+ <property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
+ <property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
+ <property name="shadow_type">GTK_SHADOW_IN</property>
+ <property name="window_placement">GTK_CORNER_TOP_LEFT</property>
+
+ <child>
+ <widget class="GtkTreeView" id="treeview1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="headers_visible">True</property>
+ <property name="rules_hint">False</property>
+ <property name="reorderable">False</property>
+ <property name="enable_search">True</property>
+ <signal name="cursor_changed" handler="on_treeview2_cursor_changed" last_modification_time="Sun, 12 Jan 2003 15:58:22 GMT"/>
+ <signal name="button_press_event" handler="on_treeview1_button_press_event" last_modification_time="Sun, 12 Jan 2003 16:03:52 GMT"/>
+ <signal name="key_press_event" handler="on_treeview2_key_press_event" last_modification_time="Sun, 12 Jan 2003 16:11:44 GMT"/>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="shrink">True</property>
+ <property name="resize">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkVPaned" id="vpaned1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">0</property>
+
+ <child>
+ <widget class="GtkScrolledWindow" id="scrolledwindow2">
+ <property name="visible">True</property>
+ <property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
+ <property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
+ <property name="shadow_type">GTK_SHADOW_IN</property>
+ <property name="window_placement">GTK_CORNER_TOP_LEFT</property>
+
+ <child>
+ <widget class="GtkTreeView" id="treeview2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="has_focus">True</property>
+ <property name="headers_visible">True</property>
+ <property name="rules_hint">False</property>
+ <property name="reorderable">False</property>
+ <property name="enable_search">True</property>
+ <signal name="cursor_changed" handler="on_treeview2_cursor_changed" last_modification_time="Sun, 12 Jan 2003 15:57:55 GMT"/>
+ <signal name="button_press_event" handler="on_treeview2_button_press_event" last_modification_time="Sun, 12 Jan 2003 15:57:58 GMT"/>
+ <signal name="key_press_event" handler="on_treeview2_key_press_event" last_modification_time="Sun, 12 Jan 2003 15:58:01 GMT"/>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="shrink">True</property>
+ <property name="resize">False</property>
+ </packing>
+ </child>
+
+ <child>
+ <widget class="GtkScrolledWindow" id="scrolledwindow3">
+ <property name="visible">True</property>
+ <property name="hscrollbar_policy">GTK_POLICY_NEVER</property>
+ <property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
+ <property name="shadow_type">GTK_SHADOW_IN</property>
+ <property name="window_placement">GTK_CORNER_TOP_LEFT</property>
+
+ <child>
+ <widget class="GtkTextView" id="textview3">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="editable">False</property>
+ <property name="overwrite">False</property>
+ <property name="accepts_tab">True</property>
+ <property name="justification">GTK_JUSTIFY_LEFT</property>
+ <property name="wrap_mode">GTK_WRAP_WORD</property>
+ <property name="cursor_visible">True</property>
+ <property name="pixels_above_lines">0</property>
+ <property name="pixels_below_lines">0</property>
+ <property name="pixels_inside_wrap">0</property>
+ <property name="left_margin">0</property>
+ <property name="right_margin">0</property>
+ <property name="indent">0</property>
+ <property name="text" translatable="yes">Sorry, no help available for this option yet.</property>
+ </widget>
+ </child>
+ </widget>
+ <packing>
+ <property name="shrink">True</property>
+ <property name="resize">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="shrink">True</property>
+ <property name="resize">True</property>
+ </packing>
+ </child>
+ </widget>
+ <packing>
+ <property name="padding">0</property>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ </packing>
+ </child>
+ </widget>
+ </child>
+</widget>
+
+</glade-interface>
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+static const char *xpm_load[] = {
+"22 22 5 1",
+". c None",
+"# c #000000",
+"c c #838100",
+"a c #ffff00",
+"b c #ffffff",
+"......................",
+"......................",
+"......................",
+"............####....#.",
+"...........#....##.##.",
+"..................###.",
+".................####.",
+".####...........#####.",
+"#abab##########.......",
+"#babababababab#.......",
+"#ababababababa#.......",
+"#babababababab#.......",
+"#ababab###############",
+"#babab##cccccccccccc##",
+"#abab##cccccccccccc##.",
+"#bab##cccccccccccc##..",
+"#ab##cccccccccccc##...",
+"#b##cccccccccccc##....",
+"###cccccccccccc##.....",
+"##cccccccccccc##......",
+"###############.......",
+"......................"};
+
+static const char *xpm_save[] = {
+"22 22 5 1",
+". c None",
+"# c #000000",
+"a c #838100",
+"b c #c5c2c5",
+"c c #cdb6d5",
+"......................",
+".####################.",
+".#aa#bbbbbbbbbbbb#bb#.",
+".#aa#bbbbbbbbbbbb#bb#.",
+".#aa#bbbbbbbbbcbb####.",
+".#aa#bbbccbbbbbbb#aa#.",
+".#aa#bbbccbbbbbbb#aa#.",
+".#aa#bbbbbbbbbbbb#aa#.",
+".#aa#bbbbbbbbbbbb#aa#.",
+".#aa#bbbbbbbbbbbb#aa#.",
+".#aa#bbbbbbbbbbbb#aa#.",
+".#aaa############aaa#.",
+".#aaaaaaaaaaaaaaaaaa#.",
+".#aaaaaaaaaaaaaaaaaa#.",
+".#aaa#############aa#.",
+".#aaa#########bbb#aa#.",
+".#aaa#########bbb#aa#.",
+".#aaa#########bbb#aa#.",
+".#aaa#########bbb#aa#.",
+".#aaa#########bbb#aa#.",
+"..##################..",
+"......................"};
+
+static const char *xpm_back[] = {
+"22 22 3 1",
+". c None",
+"# c #000083",
+"a c #838183",
+"......................",
+"......................",
+"......................",
+"......................",
+"......................",
+"...........######a....",
+"..#......##########...",
+"..##...####......##a..",
+"..###.###.........##..",
+"..######..........##..",
+"..#####...........##..",
+"..######..........##..",
+"..#######.........##..",
+"..########.......##a..",
+"...............a###...",
+"...............###....",
+"......................",
+"......................",
+"......................",
+"......................",
+"......................",
+"......................"};
+
+static const char *xpm_tree_view[] = {
+"22 22 2 1",
+". c None",
+"# c #000000",
+"......................",
+"......................",
+"......#...............",
+"......#...............",
+"......#...............",
+"......#...............",
+"......#...............",
+"......########........",
+"......#...............",
+"......#...............",
+"......#...............",
+"......#...............",
+"......#...............",
+"......########........",
+"......#...............",
+"......#...............",
+"......#...............",
+"......#...............",
+"......#...............",
+"......########........",
+"......................",
+"......................"};
+
+static const char *xpm_single_view[] = {
+"22 22 2 1",
+". c None",
+"# c #000000",
+"......................",
+"......................",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"..........#...........",
+"......................",
+"......................"};
+
+static const char *xpm_split_view[] = {
+"22 22 2 1",
+". c None",
+"# c #000000",
+"......................",
+"......................",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......#......#........",
+"......................",
+"......................"};
+
+static const char *xpm_symbol_no[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .......... ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" .......... ",
+" "};
+
+static const char *xpm_symbol_mod[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .......... ",
+" . . ",
+" . . ",
+" . .. . ",
+" . .... . ",
+" . .... . ",
+" . .. . ",
+" . . ",
+" . . ",
+" .......... ",
+" "};
+
+static const char *xpm_symbol_yes[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .......... ",
+" . . ",
+" . . ",
+" . . . ",
+" . .. . ",
+" . . .. . ",
+" . .... . ",
+" . .. . ",
+" . . ",
+" .......... ",
+" "};
+
+static const char *xpm_choice_no[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .... ",
+" .. .. ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" . . ",
+" .. .. ",
+" .... ",
+" "};
+
+static const char *xpm_choice_yes[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .... ",
+" .. .. ",
+" . . ",
+" . .. . ",
+" . .... . ",
+" . .... . ",
+" . .. . ",
+" . . ",
+" .. .. ",
+" .... ",
+" "};
+
+static const char *xpm_menu[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .......... ",
+" . . ",
+" . .. . ",
+" . .... . ",
+" . ...... . ",
+" . ...... . ",
+" . .... . ",
+" . .. . ",
+" . . ",
+" .......... ",
+" "};
+
+static const char *xpm_menu_inv[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .......... ",
+" .......... ",
+" .. ...... ",
+" .. .... ",
+" .. .. ",
+" .. .. ",
+" .. .... ",
+" .. ...... ",
+" .......... ",
+" .......... ",
+" "};
+
+static const char *xpm_menuback[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" .......... ",
+" . . ",
+" . .. . ",
+" . .... . ",
+" . ...... . ",
+" . ...... . ",
+" . .... . ",
+" . .. . ",
+" . . ",
+" .......... ",
+" "};
+
+static const char *xpm_void[] = {
+"12 12 2 1",
+" c white",
+". c black",
+" ",
+" ",
+" ",
+" ",
+" ",
+" ",
+" ",
+" ",
+" ",
+" ",
+" ",
+" "};
--- /dev/null
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "lkc.h"
+
+#define P(name,type,arg) type (*name ## _p) arg
+#include "lkc_proto.h"
+#undef P
+
+void kconfig_load(void)
+{
+ void *handle;
+ char *error;
+
+ handle = dlopen("./libkconfig.so", RTLD_LAZY);
+ if (!handle) {
+ handle = dlopen("./scripts/kconfig/libkconfig.so", RTLD_LAZY);
+ if (!handle) {
+ fprintf(stderr, "%s\n", dlerror());
+ exit(1);
+ }
+ }
+
+#define P(name,type,arg) \
+{ \
+ name ## _p = dlsym(handle, #name); \
+ if ((error = dlerror())) { \
+ fprintf(stderr, "%s\n", error); \
+ exit(1); \
+ } \
+}
+#include "lkc_proto.h"
+#undef P
+}
--- /dev/null
+/*
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2005
+ *
+ * Released under the terms of the GNU GPL v2.0
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+static char *escape(const char* text, char *bf, int len)
+{
+ char *bfp = bf;
+ int multiline = strchr(text, '\n') != NULL;
+ int eol = 0;
+ int textlen = strlen(text);
+
+ if ((textlen > 0) && (text[textlen-1] == '\n'))
+ eol = 1;
+
+ *bfp++ = '"';
+ --len;
+
+ if (multiline) {
+ *bfp++ = '"';
+ *bfp++ = '\n';
+ *bfp++ = '"';
+ len -= 3;
+ }
+
+ while (*text != '\0' && len > 1) {
+ if (*text == '"')
+ *bfp++ = '\\';
+ else if (*text == '\n') {
+ *bfp++ = '\\';
+ *bfp++ = 'n';
+ *bfp++ = '"';
+ *bfp++ = '\n';
+ *bfp++ = '"';
+ len -= 5;
+ ++text;
+ goto next;
+ }
+ *bfp++ = *text++;
+next:
+ --len;
+ }
+
+ if (multiline && eol)
+ bfp -= 3;
+
+ *bfp++ = '"';
+ *bfp = '\0';
+
+ return bf;
+}
+
+struct file_line {
+ struct file_line *next;
+ char* file;
+ int lineno;
+};
+
+static struct file_line *file_line__new(char *file, int lineno)
+{
+ struct file_line *self = malloc(sizeof(*self));
+
+ if (self == NULL)
+ goto out;
+
+ self->file = file;
+ self->lineno = lineno;
+ self->next = NULL;
+out:
+ return self;
+}
+
+struct message {
+ const char *msg;
+ const char *option;
+ struct message *next;
+ struct file_line *files;
+};
+
+static struct message *message__list;
+
+static struct message *message__new(const char *msg, char *option, char *file, int lineno)
+{
+ struct message *self = malloc(sizeof(*self));
+
+ if (self == NULL)
+ goto out;
+
+ self->files = file_line__new(file, lineno);
+ if (self->files == NULL)
+ goto out_fail;
+
+ self->msg = strdup(msg);
+ if (self->msg == NULL)
+ goto out_fail_msg;
+
+ self->option = option;
+ self->next = NULL;
+out:
+ return self;
+out_fail_msg:
+ free(self->files);
+out_fail:
+ free(self);
+ self = NULL;
+ goto out;
+}
+
+static struct message *mesage__find(const char *msg)
+{
+ struct message *m = message__list;
+
+ while (m != NULL) {
+ if (strcmp(m->msg, msg) == 0)
+ break;
+ m = m->next;
+ }
+
+ return m;
+}
+
+static int message__add_file_line(struct message *self, char *file, int lineno)
+{
+ int rc = -1;
+ struct file_line *fl = file_line__new(file, lineno);
+
+ if (fl == NULL)
+ goto out;
+
+ fl->next = self->files;
+ self->files = fl;
+ rc = 0;
+out:
+ return rc;
+}
+
+static int message__add(const char *msg, char *option, char *file, int lineno)
+{
+ int rc = 0;
+ char bf[16384];
+ char *escaped = escape(msg, bf, sizeof(bf));
+ struct message *m = mesage__find(escaped);
+
+ if (m != NULL)
+ rc = message__add_file_line(m, file, lineno);
+ else {
+ m = message__new(escaped, option, file, lineno);
+
+ if (m != NULL) {
+ m->next = message__list;
+ message__list = m;
+ } else
+ rc = -1;
+ }
+ return rc;
+}
+
+void menu_build_message_list(struct menu *menu)
+{
+ struct menu *child;
+
+ message__add(menu_get_prompt(menu), NULL,
+ menu->file == NULL ? "Root Menu" : menu->file->name,
+ menu->lineno);
+
+ if (menu->sym != NULL && menu->sym->help != NULL)
+ message__add(menu->sym->help, menu->sym->name,
+ menu->file == NULL ? "Root Menu" : menu->file->name,
+ menu->lineno);
+
+ for (child = menu->list; child != NULL; child = child->next)
+ if (child->prompt != NULL)
+ menu_build_message_list(child);
+}
+
+static void message__print_file_lineno(struct message *self)
+{
+ struct file_line *fl = self->files;
+
+ putchar('\n');
+ if (self->option != NULL)
+ printf("# %s:00000\n", self->option);
+
+ printf("#: %s:%d", fl->file, fl->lineno);
+ fl = fl->next;
+
+ while (fl != NULL) {
+ printf(", %s:%d", fl->file, fl->lineno);
+ fl = fl->next;
+ }
+
+ putchar('\n');
+}
+
+static void message__print_gettext_msgid_msgstr(struct message *self)
+{
+ message__print_file_lineno(self);
+
+ printf("msgid %s\n"
+ "msgstr \"\"\n", self->msg);
+}
+
+void menu__xgettext(void)
+{
+ struct message *m = message__list;
+
+ while (m != NULL) {
+ message__print_gettext_msgid_msgstr(m);
+ m = m->next;
+ }
+}
+
+int main(int ac, char **av)
+{
+ conf_parse(av[1]);
+
+ menu_build_message_list(menu_get_root_menu(NULL));
+ menu__xgettext();
+ return 0;
+}
--- /dev/null
+
+#line 3 "scripts/kconfig/lex.zconf.c"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 5
+#define YY_FLEX_SUBMINOR_VERSION 31
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#endif /* ! FLEXINT_H */
+
+#ifdef __cplusplus
+
+/* The "const" storage-class-modifier is valid. */
+#define YY_USE_CONST
+
+#else /* ! __cplusplus */
+
+#if __STDC__
+
+#define YY_USE_CONST
+
+#endif /* __STDC__ */
+#endif /* ! __cplusplus */
+
+#ifdef YY_USE_CONST
+#define yyconst const
+#else
+#define yyconst
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an unsigned
+ * integer for use as an array index. If the signed char is negative,
+ * we want to instead treat it as an 8-bit unsigned char, hence the
+ * double cast.
+ */
+#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN (yy_start) = 1 + 2 *
+
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START (((yy_start) - 1) / 2)
+#define YYSTATE YY_START
+
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE zconfrestart(zconfin )
+
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#define YY_BUF_SIZE 16384
+#endif
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+extern int zconfleng;
+
+extern FILE *zconfin, *zconfout;
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up zconftext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = (yy_hold_char); \
+ YY_RESTORE_YY_MORE_OFFSET \
+ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up zconftext again */ \
+ } \
+ while ( 0 )
+
+#define unput(c) yyunput( c, (yytext_ptr) )
+
+/* The following is because we cannot portably get our hands on size_t
+ * (without autoconf's help, which isn't available because we want
+ * flex-generated scanners to compile on their own).
+ */
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef unsigned int yy_size_t;
+#endif
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ yy_size_t yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via zconfrestart()), so that the user can continue scanning by
+ * just pointing zconfin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* Stack of input buffers. */
+static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
+static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
+static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
+ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
+ : NULL)
+
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
+
+/* yy_hold_char holds the character lost when zconftext is formed. */
+static char yy_hold_char;
+static int yy_n_chars; /* number of characters read into yy_ch_buf */
+int zconfleng;
+
+/* Points to current character in buffer. */
+static char *yy_c_buf_p = (char *) 0;
+static int yy_init = 1; /* whether we need to initialize */
+static int yy_start = 0; /* start state number */
+
+/* Flag which is used to allow zconfwrap()'s to do buffer switches
+ * instead of setting up a fresh zconfin. A bit of a hack ...
+ */
+static int yy_did_buffer_switch_on_eof;
+
+void zconfrestart (FILE *input_file );
+void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer );
+YY_BUFFER_STATE zconf_create_buffer (FILE *file,int size );
+void zconf_delete_buffer (YY_BUFFER_STATE b );
+void zconf_flush_buffer (YY_BUFFER_STATE b );
+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer );
+void zconfpop_buffer_state (void );
+
+static void zconfensure_buffer_stack (void );
+static void zconf_load_buffer_state (void );
+static void zconf_init_buffer (YY_BUFFER_STATE b,FILE *file );
+
+#define YY_FLUSH_BUFFER zconf_flush_buffer(YY_CURRENT_BUFFER )
+
+YY_BUFFER_STATE zconf_scan_buffer (char *base,yy_size_t size );
+YY_BUFFER_STATE zconf_scan_string (yyconst char *yy_str );
+YY_BUFFER_STATE zconf_scan_bytes (yyconst char *bytes,int len );
+
+void *zconfalloc (yy_size_t );
+void *zconfrealloc (void *,yy_size_t );
+void zconffree (void * );
+
+#define yy_new_buffer zconf_create_buffer
+
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ zconfensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ zconfensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+#define zconfwrap() 1
+#define YY_SKIP_YYWRAP
+
+typedef unsigned char YY_CHAR;
+
+FILE *zconfin = (FILE *) 0, *zconfout = (FILE *) 0;
+
+typedef int yy_state_type;
+
+extern int zconflineno;
+
+int zconflineno = 1;
+
+extern char *zconftext;
+#define yytext_ptr zconftext
+static yyconst flex_int16_t yy_nxt[][17] =
+ {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0
+ },
+
+ {
+ 11, 12, 13, 14, 12, 12, 15, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12
+ },
+
+ {
+ 11, 12, 13, 14, 12, 12, 15, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12
+ },
+
+ {
+ 11, 16, 16, 17, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 18, 16, 16, 16
+ },
+
+ {
+ 11, 16, 16, 17, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 18, 16, 16, 16
+
+ },
+
+ {
+ 11, 19, 20, 21, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19
+ },
+
+ {
+ 11, 19, 20, 21, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19
+ },
+
+ {
+ 11, 22, 22, 23, 22, 24, 22, 22, 24, 22,
+ 22, 22, 22, 22, 22, 25, 22
+ },
+
+ {
+ 11, 22, 22, 23, 22, 24, 22, 22, 24, 22,
+ 22, 22, 22, 22, 22, 25, 22
+ },
+
+ {
+ 11, 26, 26, 27, 28, 29, 30, 31, 29, 32,
+ 33, 34, 35, 35, 36, 37, 38
+
+ },
+
+ {
+ 11, 26, 26, 27, 28, 29, 30, 31, 29, 32,
+ 33, 34, 35, 35, 36, 37, 38
+ },
+
+ {
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11
+ },
+
+ {
+ 11, -12, -12, -12, -12, -12, -12, -12, -12, -12,
+ -12, -12, -12, -12, -12, -12, -12
+ },
+
+ {
+ 11, -13, 39, 40, -13, -13, 41, -13, -13, -13,
+ -13, -13, -13, -13, -13, -13, -13
+ },
+
+ {
+ 11, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -14, -14
+
+ },
+
+ {
+ 11, 42, 42, 43, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42
+ },
+
+ {
+ 11, -16, -16, -16, -16, -16, -16, -16, -16, -16,
+ -16, -16, -16, -16, -16, -16, -16
+ },
+
+ {
+ 11, -17, -17, -17, -17, -17, -17, -17, -17, -17,
+ -17, -17, -17, -17, -17, -17, -17
+ },
+
+ {
+ 11, -18, -18, -18, -18, -18, -18, -18, -18, -18,
+ -18, -18, -18, 44, -18, -18, -18
+ },
+
+ {
+ 11, 45, 45, -19, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 45, 45, 45, 45
+
+ },
+
+ {
+ 11, -20, 46, 47, -20, -20, -20, -20, -20, -20,
+ -20, -20, -20, -20, -20, -20, -20
+ },
+
+ {
+ 11, 48, -21, -21, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48
+ },
+
+ {
+ 11, 49, 49, 50, 49, -22, 49, 49, -22, 49,
+ 49, 49, 49, 49, 49, -22, 49
+ },
+
+ {
+ 11, -23, -23, -23, -23, -23, -23, -23, -23, -23,
+ -23, -23, -23, -23, -23, -23, -23
+ },
+
+ {
+ 11, -24, -24, -24, -24, -24, -24, -24, -24, -24,
+ -24, -24, -24, -24, -24, -24, -24
+
+ },
+
+ {
+ 11, 51, 51, 52, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51
+ },
+
+ {
+ 11, -26, -26, -26, -26, -26, -26, -26, -26, -26,
+ -26, -26, -26, -26, -26, -26, -26
+ },
+
+ {
+ 11, -27, -27, -27, -27, -27, -27, -27, -27, -27,
+ -27, -27, -27, -27, -27, -27, -27
+ },
+
+ {
+ 11, -28, -28, -28, -28, -28, -28, -28, -28, -28,
+ -28, -28, -28, -28, 53, -28, -28
+ },
+
+ {
+ 11, -29, -29, -29, -29, -29, -29, -29, -29, -29,
+ -29, -29, -29, -29, -29, -29, -29
+
+ },
+
+ {
+ 11, 54, 54, -30, 54, 54, 54, 54, 54, 54,
+ 54, 54, 54, 54, 54, 54, 54
+ },
+
+ {
+ 11, -31, -31, -31, -31, -31, -31, 55, -31, -31,
+ -31, -31, -31, -31, -31, -31, -31
+ },
+
+ {
+ 11, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+ -32, -32, -32, -32, -32, -32, -32
+ },
+
+ {
+ 11, -33, -33, -33, -33, -33, -33, -33, -33, -33,
+ -33, -33, -33, -33, -33, -33, -33
+ },
+
+ {
+ 11, -34, -34, -34, -34, -34, -34, -34, -34, -34,
+ -34, 56, 57, 57, -34, -34, -34
+
+ },
+
+ {
+ 11, -35, -35, -35, -35, -35, -35, -35, -35, -35,
+ -35, 57, 57, 57, -35, -35, -35
+ },
+
+ {
+ 11, -36, -36, -36, -36, -36, -36, -36, -36, -36,
+ -36, -36, -36, -36, -36, -36, -36
+ },
+
+ {
+ 11, -37, -37, 58, -37, -37, -37, -37, -37, -37,
+ -37, -37, -37, -37, -37, -37, -37
+ },
+
+ {
+ 11, -38, -38, -38, -38, -38, -38, -38, -38, -38,
+ -38, -38, -38, -38, -38, -38, 59
+ },
+
+ {
+ 11, -39, 39, 40, -39, -39, 41, -39, -39, -39,
+ -39, -39, -39, -39, -39, -39, -39
+
+ },
+
+ {
+ 11, -40, -40, -40, -40, -40, -40, -40, -40, -40,
+ -40, -40, -40, -40, -40, -40, -40
+ },
+
+ {
+ 11, 42, 42, 43, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42
+ },
+
+ {
+ 11, 42, 42, 43, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42
+ },
+
+ {
+ 11, -43, -43, -43, -43, -43, -43, -43, -43, -43,
+ -43, -43, -43, -43, -43, -43, -43
+ },
+
+ {
+ 11, -44, -44, -44, -44, -44, -44, -44, -44, -44,
+ -44, -44, -44, 44, -44, -44, -44
+
+ },
+
+ {
+ 11, 45, 45, -45, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 45, 45, 45, 45
+ },
+
+ {
+ 11, -46, 46, 47, -46, -46, -46, -46, -46, -46,
+ -46, -46, -46, -46, -46, -46, -46
+ },
+
+ {
+ 11, 48, -47, -47, 48, 48, 48, 48, 48, 48,
+ 48, 48, 48, 48, 48, 48, 48
+ },
+
+ {
+ 11, -48, -48, -48, -48, -48, -48, -48, -48, -48,
+ -48, -48, -48, -48, -48, -48, -48
+ },
+
+ {
+ 11, 49, 49, 50, 49, -49, 49, 49, -49, 49,
+ 49, 49, 49, 49, 49, -49, 49
+
+ },
+
+ {
+ 11, -50, -50, -50, -50, -50, -50, -50, -50, -50,
+ -50, -50, -50, -50, -50, -50, -50
+ },
+
+ {
+ 11, -51, -51, 52, -51, -51, -51, -51, -51, -51,
+ -51, -51, -51, -51, -51, -51, -51
+ },
+
+ {
+ 11, -52, -52, -52, -52, -52, -52, -52, -52, -52,
+ -52, -52, -52, -52, -52, -52, -52
+ },
+
+ {
+ 11, -53, -53, -53, -53, -53, -53, -53, -53, -53,
+ -53, -53, -53, -53, -53, -53, -53
+ },
+
+ {
+ 11, 54, 54, -54, 54, 54, 54, 54, 54, 54,
+ 54, 54, 54, 54, 54, 54, 54
+
+ },
+
+ {
+ 11, -55, -55, -55, -55, -55, -55, -55, -55, -55,
+ -55, -55, -55, -55, -55, -55, -55
+ },
+
+ {
+ 11, -56, -56, -56, -56, -56, -56, -56, -56, -56,
+ -56, 60, 57, 57, -56, -56, -56
+ },
+
+ {
+ 11, -57, -57, -57, -57, -57, -57, -57, -57, -57,
+ -57, 57, 57, 57, -57, -57, -57
+ },
+
+ {
+ 11, -58, -58, -58, -58, -58, -58, -58, -58, -58,
+ -58, -58, -58, -58, -58, -58, -58
+ },
+
+ {
+ 11, -59, -59, -59, -59, -59, -59, -59, -59, -59,
+ -59, -59, -59, -59, -59, -59, -59
+
+ },
+
+ {
+ 11, -60, -60, -60, -60, -60, -60, -60, -60, -60,
+ -60, 57, 57, 57, -60, -60, -60
+ },
+
+ } ;
+
+static yy_state_type yy_get_previous_state (void );
+static yy_state_type yy_try_NUL_trans (yy_state_type current_state );
+static int yy_get_next_buffer (void );
+static void yy_fatal_error (yyconst char msg[] );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up zconftext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ (yytext_ptr) = yy_bp; \
+ zconfleng = (size_t) (yy_cp - yy_bp); \
+ (yy_hold_char) = *yy_cp; \
+ *yy_cp = '\0'; \
+ (yy_c_buf_p) = yy_cp;
+
+#define YY_NUM_RULES 33
+#define YY_END_OF_BUFFER 34
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static yyconst flex_int16_t yy_accept[61] =
+ { 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 34, 5, 4, 2, 3, 7, 8, 6, 32, 29,
+ 31, 24, 28, 27, 26, 22, 17, 13, 16, 20,
+ 22, 11, 12, 19, 19, 14, 22, 22, 4, 2,
+ 3, 3, 1, 6, 32, 29, 31, 30, 24, 23,
+ 26, 25, 15, 20, 9, 19, 19, 21, 10, 18
+ } ;
+
+static yyconst flex_int32_t yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 4, 5, 6, 1, 1, 7, 8, 9,
+ 10, 1, 1, 1, 11, 12, 12, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 1, 1, 1,
+ 14, 1, 1, 1, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 1, 15, 1, 1, 13, 1, 13, 13, 13, 13,
+
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 1, 16, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+extern int zconf_flex_debug;
+int zconf_flex_debug = 0;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+char *zconftext;
+
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+#define START_STRSIZE 16
+
+static struct {
+ struct file *file;
+ int lineno;
+} current_pos;
+
+static char *text;
+static int text_size, text_asize;
+
+struct buffer {
+ struct buffer *parent;
+ YY_BUFFER_STATE state;
+};
+
+struct buffer *current_buf;
+
+static int last_ts, first_ts;
+
+static void zconf_endhelp(void);
+static void zconf_endfile(void);
+
+void new_string(void)
+{
+ text = malloc(START_STRSIZE);
+ text_asize = START_STRSIZE;
+ text_size = 0;
+ *text = 0;
+}
+
+void append_string(const char *str, int size)
+{
+ int new_size = text_size + size + 1;
+ if (new_size > text_asize) {
+ new_size += START_STRSIZE - 1;
+ new_size &= -START_STRSIZE;
+ text = realloc(text, new_size);
+ text_asize = new_size;
+ }
+ memcpy(text + text_size, str, size);
+ text_size += size;
+ text[text_size] = 0;
+}
+
+void alloc_string(const char *str, int size)
+{
+ text = malloc(size + 1);
+ memcpy(text, str, size);
+ text[size] = 0;
+}
+
+#define INITIAL 0
+#define COMMAND 1
+#define HELP 2
+#define STRING 3
+#define PARAM 4
+
+#ifndef YY_NO_UNISTD_H
+/* Special case for "unistd.h", since it is non-ANSI. We include it way
+ * down here because we want the user's section 1 to have been scanned first.
+ * The user has a chance to override it with an option.
+ */
+#include <unistd.h>
+#endif
+
+#ifndef YY_EXTRA_TYPE
+#define YY_EXTRA_TYPE void *
+#endif
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int zconfwrap (void );
+#else
+extern int zconfwrap (void );
+#endif
+#endif
+
+ static void yyunput (int c,char *buf_ptr );
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char *,yyconst char *,int );
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * );
+#endif
+
+#ifndef YY_NO_INPUT
+
+#ifdef __cplusplus
+static int yyinput (void );
+#else
+static int input (void );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#define YY_READ_BUF_SIZE 8192
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO (void) fwrite( zconftext, zconfleng, 1, zconfout )
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ errno=0; \
+ while ( (result = read( fileno(zconfin), (char *) buf, max_size )) < 0 ) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(zconfin); \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int zconflex (void);
+
+#define YY_DECL int zconflex (void)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after zconftext and zconfleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp, *yy_bp;
+ register int yy_act;
+
+ int str = 0;
+ int ts, i;
+
+ if ( (yy_init) )
+ {
+ (yy_init) = 0;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! (yy_start) )
+ (yy_start) = 1; /* first start state */
+
+ if ( ! zconfin )
+ zconfin = stdin;
+
+ if ( ! zconfout )
+ zconfout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ zconfensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ zconf_create_buffer(zconfin,YY_BUF_SIZE );
+ }
+
+ zconf_load_buffer_state( );
+ }
+
+ while ( 1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = (yy_c_buf_p);
+
+ /* Support of zconftext. */
+ *yy_cp = (yy_hold_char);
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = (yy_start);
+yy_match:
+ while ( (yy_current_state = yy_nxt[yy_current_state][ yy_ec[YY_SC_TO_UI(*yy_cp)] ]) > 0 )
+ ++yy_cp;
+
+ yy_current_state = -yy_current_state;
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+case 1:
+/* rule 1 can match eol */
+case 2:
+/* rule 2 can match eol */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ return T_EOL;
+}
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+{
+ BEGIN(COMMAND);
+}
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+{
+ unput(zconftext[0]);
+ BEGIN(COMMAND);
+}
+ YY_BREAK
+
+case 6:
+YY_RULE_SETUP
+{
+ struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng);
+ BEGIN(PARAM);
+ current_pos.file = current_file;
+ current_pos.lineno = current_file->lineno;
+ if (id && id->flags & TF_COMMAND) {
+ zconflval.id = id;
+ return id->token;
+ }
+ alloc_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+
+ YY_BREAK
+case 8:
+/* rule 8 can match eol */
+YY_RULE_SETUP
+{
+ BEGIN(INITIAL);
+ current_file->lineno++;
+ return T_EOL;
+ }
+ YY_BREAK
+
+case 9:
+YY_RULE_SETUP
+return T_AND;
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+return T_OR;
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+return T_OPEN_PAREN;
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+return T_CLOSE_PAREN;
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+return T_NOT;
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+return T_EQUAL;
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+return T_UNEQUAL;
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+{
+ str = zconftext[0];
+ new_string();
+ BEGIN(STRING);
+ }
+ YY_BREAK
+case 17:
+/* rule 17 can match eol */
+YY_RULE_SETUP
+BEGIN(INITIAL); current_file->lineno++; return T_EOL;
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+/* ignore */
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+{
+ struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng);
+ if (id && id->flags & TF_PARAM) {
+ zconflval.id = id;
+ return id->token;
+ }
+ alloc_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+/* comment */
+ YY_BREAK
+case 21:
+/* rule 21 can match eol */
+YY_RULE_SETUP
+current_file->lineno++;
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+
+ YY_BREAK
+case YY_STATE_EOF(PARAM):
+{
+ BEGIN(INITIAL);
+ }
+ YY_BREAK
+
+case 23:
+/* rule 23 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ }
+ YY_BREAK
+case 25:
+/* rule 25 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ append_string(zconftext + 1, zconfleng - 1);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ YY_BREAK
+case 26:
+YY_RULE_SETUP
+{
+ append_string(zconftext + 1, zconfleng - 1);
+ }
+ YY_BREAK
+case 27:
+YY_RULE_SETUP
+{
+ if (str == zconftext[0]) {
+ BEGIN(PARAM);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ } else
+ append_string(zconftext, 1);
+ }
+ YY_BREAK
+case 28:
+/* rule 28 can match eol */
+YY_RULE_SETUP
+{
+ printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
+ current_file->lineno++;
+ BEGIN(INITIAL);
+ return T_EOL;
+ }
+ YY_BREAK
+case YY_STATE_EOF(STRING):
+{
+ BEGIN(INITIAL);
+ }
+ YY_BREAK
+
+case 29:
+YY_RULE_SETUP
+{
+ ts = 0;
+ for (i = 0; i < zconfleng; i++) {
+ if (zconftext[i] == '\t')
+ ts = (ts & ~7) + 8;
+ else
+ ts++;
+ }
+ last_ts = ts;
+ if (first_ts) {
+ if (ts < first_ts) {
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ ts -= first_ts;
+ while (ts > 8) {
+ append_string(" ", 8);
+ ts -= 8;
+ }
+ append_string(" ", ts);
+ }
+ }
+ YY_BREAK
+case 30:
+/* rule 30 can match eol */
+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
+(yy_c_buf_p) = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up zconftext again */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ YY_BREAK
+case 31:
+/* rule 31 can match eol */
+YY_RULE_SETUP
+{
+ current_file->lineno++;
+ append_string("\n", 1);
+ }
+ YY_BREAK
+case 32:
+YY_RULE_SETUP
+{
+ append_string(zconftext, zconfleng);
+ if (!first_ts)
+ first_ts = last_ts;
+ }
+ YY_BREAK
+case YY_STATE_EOF(HELP):
+{
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ YY_BREAK
+
+case YY_STATE_EOF(INITIAL):
+case YY_STATE_EOF(COMMAND):
+{
+ if (current_file) {
+ zconf_endfile();
+ return T_EOL;
+ }
+ fclose(zconfin);
+ yyterminate();
+}
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = (yy_hold_char);
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed zconfin at a new source and called
+ * zconflex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = zconfin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state );
+
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++(yy_c_buf_p);
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = (yy_c_buf_p);
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ (yy_did_buffer_switch_on_eof) = 0;
+
+ if ( zconfwrap( ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * zconftext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) =
+ (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ (yy_c_buf_p) =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+} /* end of zconflex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (void)
+{
+ register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ register char *source = (yytext_ptr);
+ register int number_to_move, i;
+ int ret_val;
+
+ if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
+
+ else
+ {
+ size_t num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
+
+ int yy_c_buf_p_offset =
+ (int) ((yy_c_buf_p) - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ zconfrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = 0;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ (yy_n_chars), num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ if ( (yy_n_chars) == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ zconfrestart(zconfin );
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ (yy_n_chars) += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
+
+ (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (void)
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp;
+
+ yy_current_state = (yy_start);
+
+ for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
+ {
+ yy_current_state = yy_nxt[yy_current_state][(*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1)];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
+{
+ register int yy_is_jam;
+
+ yy_current_state = yy_nxt[yy_current_state][1];
+ yy_is_jam = (yy_current_state <= 0);
+
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+ static void yyunput (int c, register char * yy_bp )
+{
+ register char *yy_cp;
+
+ yy_cp = (yy_c_buf_p);
+
+ /* undo effects of setting up zconftext */
+ *yy_cp = (yy_hold_char);
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ { /* need to shift things up to make room */
+ /* +2 for EOB chars. */
+ register int number_to_move = (yy_n_chars) + 2;
+ register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
+ register char *source =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
+
+ while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ *--dest = *--source;
+
+ yy_cp += (int) (dest - source);
+ yy_bp += (int) (dest - source);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ YY_FATAL_ERROR( "flex scanner push-back overflow" );
+ }
+
+ *--yy_cp = (char) c;
+
+ (yytext_ptr) = yy_bp;
+ (yy_hold_char) = *yy_cp;
+ (yy_c_buf_p) = yy_cp;
+}
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (void)
+#else
+ static int input (void)
+#endif
+
+{
+ int c;
+
+ *(yy_c_buf_p) = (yy_hold_char);
+
+ if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ /* This was really a NUL. */
+ *(yy_c_buf_p) = '\0';
+
+ else
+ { /* need more input */
+ int offset = (yy_c_buf_p) - (yytext_ptr);
+ ++(yy_c_buf_p);
+
+ switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ zconfrestart(zconfin );
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( zconfwrap( ) )
+ return EOF;
+
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput();
+#else
+ return input();
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) = (yytext_ptr) + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
+ *(yy_c_buf_p) = '\0'; /* preserve zconftext */
+ (yy_hold_char) = *++(yy_c_buf_p);
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ *
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void zconfrestart (FILE * input_file )
+{
+
+ if ( ! YY_CURRENT_BUFFER ){
+ zconfensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ zconf_create_buffer(zconfin,YY_BUF_SIZE );
+ }
+
+ zconf_init_buffer(YY_CURRENT_BUFFER,input_file );
+ zconf_load_buffer_state( );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ *
+ */
+ void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer )
+{
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * zconfpop_buffer_state();
+ * zconfpush_buffer_state(new_buffer);
+ */
+ zconfensure_buffer_stack ();
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ zconf_load_buffer_state( );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (zconfwrap()) processing, but the only time this flag
+ * is looked at is after zconfwrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+static void zconf_load_buffer_state (void)
+{
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ zconfin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ (yy_hold_char) = *(yy_c_buf_p);
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ *
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE zconf_create_buffer (FILE * file, int size )
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) zconfalloc(b->yy_buf_size + 2 );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ zconf_init_buffer(b,file );
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with zconf_create_buffer()
+ *
+ */
+ void zconf_delete_buffer (YY_BUFFER_STATE b )
+{
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ zconffree((void *) b->yy_ch_buf );
+
+ zconffree((void *) b );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a zconfrestart() or at EOF.
+ */
+ static void zconf_init_buffer (YY_BUFFER_STATE b, FILE * file )
+
+{
+ int oerrno = errno;
+
+ zconf_flush_buffer(b );
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then zconf_init_buffer was _probably_
+ * called from zconfrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ *
+ */
+ void zconf_flush_buffer (YY_BUFFER_STATE b )
+{
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ zconf_load_buffer_state( );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ *
+ */
+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer )
+{
+ if (new_buffer == NULL)
+ return;
+
+ zconfensure_buffer_stack();
+
+ /* This block is copied from zconf_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ (yy_buffer_stack_top)++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from zconf_switch_to_buffer. */
+ zconf_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ *
+ */
+void zconfpop_buffer_state (void)
+{
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ zconf_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if ((yy_buffer_stack_top) > 0)
+ --(yy_buffer_stack_top);
+
+ if (YY_CURRENT_BUFFER) {
+ zconf_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void zconfensure_buffer_stack (void)
+{
+ int num_to_alloc;
+
+ if (!(yy_buffer_stack)) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1;
+ (yy_buffer_stack) = (struct yy_buffer_state**)zconfalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+
+ memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ (yy_buffer_stack_max) = num_to_alloc;
+ (yy_buffer_stack_top) = 0;
+ return;
+ }
+
+ if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ int grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = (yy_buffer_stack_max) + grow_size;
+ (yy_buffer_stack) = (struct yy_buffer_state**)zconfrealloc
+ ((yy_buffer_stack),
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+
+ /* zero only the new slots.*/
+ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
+ (yy_buffer_stack_max) = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE zconf_scan_buffer (char * base, yy_size_t size )
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return 0;
+
+ b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_buffer()" );
+
+ b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = 0;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ zconf_switch_to_buffer(b );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to zconflex() will
+ * scan from a @e copy of @a str.
+ * @param yy_str a NUL-terminated string to scan
+ *
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * zconf_scan_bytes() instead.
+ */
+YY_BUFFER_STATE zconf_scan_string (yyconst char * yy_str )
+{
+
+ return zconf_scan_bytes(yy_str,strlen(yy_str) );
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
+ * scan from a @e copy of @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE zconf_scan_bytes (yyconst char * bytes, int len )
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = len + 2;
+ buf = (char *) zconfalloc(n );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" );
+
+ for ( i = 0; i < len; ++i )
+ buf[i] = bytes[i];
+
+ buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = zconf_scan_buffer(buf,n );
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in zconf_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yy_fatal_error (yyconst char* msg )
+{
+ (void) fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up zconftext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ zconftext[zconfleng] = (yy_hold_char); \
+ (yy_c_buf_p) = zconftext + yyless_macro_arg; \
+ (yy_hold_char) = *(yy_c_buf_p); \
+ *(yy_c_buf_p) = '\0'; \
+ zconfleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the current line number.
+ *
+ */
+int zconfget_lineno (void)
+{
+
+ return zconflineno;
+}
+
+/** Get the input stream.
+ *
+ */
+FILE *zconfget_in (void)
+{
+ return zconfin;
+}
+
+/** Get the output stream.
+ *
+ */
+FILE *zconfget_out (void)
+{
+ return zconfout;
+}
+
+/** Get the length of the current token.
+ *
+ */
+int zconfget_leng (void)
+{
+ return zconfleng;
+}
+
+/** Get the current token.
+ *
+ */
+
+char *zconfget_text (void)
+{
+ return zconftext;
+}
+
+/** Set the current line number.
+ * @param line_number
+ *
+ */
+void zconfset_lineno (int line_number )
+{
+
+ zconflineno = line_number;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param in_str A readable stream.
+ *
+ * @see zconf_switch_to_buffer
+ */
+void zconfset_in (FILE * in_str )
+{
+ zconfin = in_str ;
+}
+
+void zconfset_out (FILE * out_str )
+{
+ zconfout = out_str ;
+}
+
+int zconfget_debug (void)
+{
+ return zconf_flex_debug;
+}
+
+void zconfset_debug (int bdebug )
+{
+ zconf_flex_debug = bdebug ;
+}
+
+/* zconflex_destroy is for both reentrant and non-reentrant scanners. */
+int zconflex_destroy (void)
+{
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ zconf_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ zconfpop_buffer_state();
+ }
+
+ /* Destroy the stack itself. */
+ zconffree((yy_buffer_stack) );
+ (yy_buffer_stack) = NULL;
+
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
+{
+ register int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * s )
+{
+ register int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *zconfalloc (yy_size_t size )
+{
+ return (void *) malloc( size );
+}
+
+void *zconfrealloc (void * ptr, yy_size_t size )
+{
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return (void *) realloc( (char *) ptr, size );
+}
+
+void zconffree (void * ptr )
+{
+ free( (char *) ptr ); /* see zconfrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#undef YY_NEW_FILE
+#undef YY_FLUSH_BUFFER
+#undef yy_set_bol
+#undef yy_new_buffer
+#undef yy_set_interactive
+#undef yytext_ptr
+#undef YY_DO_BEFORE_ACTION
+
+#ifdef YY_DECL_IS_OURS
+#undef YY_DECL_IS_OURS
+#undef YY_DECL
+#endif
+
+void zconf_starthelp(void)
+{
+ new_string();
+ last_ts = first_ts = 0;
+ BEGIN(HELP);
+}
+
+static void zconf_endhelp(void)
+{
+ zconflval.string = text;
+ BEGIN(INITIAL);
+}
+
+/*
+ * Try to open specified file with following names:
+ * ./name
+ * $(srctree)/name
+ * The latter is used when srctree is separate from objtree
+ * when compiling the kernel.
+ * Return NULL if file is not found.
+ */
+FILE *zconf_fopen(const char *name)
+{
+ char *env, fullname[PATH_MAX+1];
+ FILE *f;
+
+ f = fopen(name, "r");
+ if (!f && name[0] != '/') {
+ env = getenv(SRCTREE);
+ if (env) {
+ sprintf(fullname, "%s/%s", env, name);
+ f = fopen(fullname, "r");
+ }
+ }
+ return f;
+}
+
+void zconf_initscan(const char *name)
+{
+ zconfin = zconf_fopen(name);
+ if (!zconfin) {
+ printf("can't find file %s\n", name);
+ exit(1);
+ }
+
+ current_buf = malloc(sizeof(*current_buf));
+ memset(current_buf, 0, sizeof(*current_buf));
+
+ current_file = file_lookup(name);
+ current_file->lineno = 1;
+ current_file->flags = FILE_BUSY;
+}
+
+void zconf_nextfile(const char *name)
+{
+ struct file *file = file_lookup(name);
+ struct buffer *buf = malloc(sizeof(*buf));
+ memset(buf, 0, sizeof(*buf));
+
+ current_buf->state = YY_CURRENT_BUFFER;
+ zconfin = zconf_fopen(name);
+ if (!zconfin) {
+ printf("%s:%d: can't open file \"%s\"\n", zconf_curname(), zconf_lineno(), name);
+ exit(1);
+ }
+ zconf_switch_to_buffer(zconf_create_buffer(zconfin,YY_BUF_SIZE));
+ buf->parent = current_buf;
+ current_buf = buf;
+
+ if (file->flags & FILE_BUSY) {
+ printf("recursive scan (%s)?\n", name);
+ exit(1);
+ }
+ if (file->flags & FILE_SCANNED) {
+ printf("file %s already scanned?\n", name);
+ exit(1);
+ }
+ file->flags |= FILE_BUSY;
+ file->lineno = 1;
+ file->parent = current_file;
+ current_file = file;
+}
+
+static void zconf_endfile(void)
+{
+ struct buffer *parent;
+
+ current_file->flags |= FILE_SCANNED;
+ current_file->flags &= ~FILE_BUSY;
+ current_file = current_file->parent;
+
+ parent = current_buf->parent;
+ if (parent) {
+ fclose(zconfin);
+ zconf_delete_buffer(YY_CURRENT_BUFFER);
+ zconf_switch_to_buffer(parent->state);
+ }
+ free(current_buf);
+ current_buf = parent;
+}
+
+int zconf_lineno(void)
+{
+ return current_pos.lineno;
+}
+
+char *zconf_curname(void)
+{
+ return current_pos.file ? current_pos.file->name : "<none>";
+}
+
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#ifndef LKC_H
+#define LKC_H
+
+#include "expr.h"
+
+#ifndef KBUILD_NO_NLS
+# include <libintl.h>
+#else
+# define gettext(Msgid) ((const char *) (Msgid))
+# define textdomain(Domainname) ((const char *) (Domainname))
+# define bindtextdomain(Domainname, Dirname) ((const char *) (Dirname))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef LKC_DIRECT_LINK
+#define P(name,type,arg) extern type name arg
+#else
+#include "lkc_defs.h"
+#define P(name,type,arg) extern type (*name ## _p) arg
+#endif
+#include "lkc_proto.h"
+#undef P
+
+#define SRCTREE "srctree"
+
+#define PACKAGE "linux"
+#define LOCALEDIR "/usr/share/locale"
+
+#define _(text) gettext(text)
+#define N_(text) (text)
+
+
+#define TF_COMMAND 0x0001
+#define TF_PARAM 0x0002
+
+struct kconf_id {
+ int name;
+ int token;
+ unsigned int flags;
+ enum symbol_type stype;
+};
+
+int zconfparse(void);
+void zconfdump(FILE *out);
+
+extern int zconfdebug;
+void zconf_starthelp(void);
+FILE *zconf_fopen(const char *name);
+void zconf_initscan(const char *name);
+void zconf_nextfile(const char *name);
+int zconf_lineno(void);
+char *zconf_curname(void);
+
+/* confdata.c */
+extern const char conf_def_filename[];
+
+char *conf_get_default_confname(void);
+
+/* kconfig_load.c */
+void kconfig_load(void);
+
+/* menu.c */
+void menu_init(void);
+struct menu *menu_add_menu(void);
+void menu_end_menu(void);
+void menu_add_entry(struct symbol *sym);
+void menu_end_entry(void);
+void menu_add_dep(struct expr *dep);
+struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep);
+struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
+void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
+void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
+void menu_finalize(struct menu *parent);
+void menu_set_type(int type);
+
+/* util.c */
+struct file *file_lookup(const char *name);
+int file_write_dep(const char *name);
+
+struct gstr {
+ size_t len;
+ char *s;
+};
+struct gstr str_new(void);
+struct gstr str_assign(const char *s);
+void str_free(struct gstr *gs);
+void str_append(struct gstr *gs, const char *s);
+void str_printf(struct gstr *gs, const char *fmt, ...);
+const char *str_get(struct gstr *gs);
+
+/* symbol.c */
+void sym_init(void);
+void sym_clear_all_valid(void);
+void sym_set_changed(struct symbol *sym);
+struct symbol *sym_check_deps(struct symbol *sym);
+struct property *prop_alloc(enum prop_type type, struct symbol *sym);
+struct symbol *prop_get_symbol(struct property *prop);
+
+static inline tristate sym_get_tristate_value(struct symbol *sym)
+{
+ return sym->curr.tri;
+}
+
+
+static inline struct symbol *sym_get_choice_value(struct symbol *sym)
+{
+ return (struct symbol *)sym->curr.val;
+}
+
+static inline bool sym_set_choice_value(struct symbol *ch, struct symbol *chval)
+{
+ return sym_set_tristate_value(chval, yes);
+}
+
+static inline bool sym_is_choice(struct symbol *sym)
+{
+ return sym->flags & SYMBOL_CHOICE ? true : false;
+}
+
+static inline bool sym_is_choice_value(struct symbol *sym)
+{
+ return sym->flags & SYMBOL_CHOICEVAL ? true : false;
+}
+
+static inline bool sym_is_optional(struct symbol *sym)
+{
+ return sym->flags & SYMBOL_OPTIONAL ? true : false;
+}
+
+static inline bool sym_has_value(struct symbol *sym)
+{
+ return sym->flags & SYMBOL_NEW ? false : true;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* LKC_H */
--- /dev/null
+
+/* confdata.c */
+P(conf_parse,void,(const char *name));
+P(conf_read,int,(const char *name));
+P(conf_read_simple,int,(const char *name));
+P(conf_write,int,(const char *name));
+
+/* menu.c */
+P(rootmenu,struct menu,);
+
+P(menu_is_visible,bool,(struct menu *menu));
+P(menu_get_prompt,const char *,(struct menu *menu));
+P(menu_get_root_menu,struct menu *,(struct menu *menu));
+P(menu_get_parent_menu,struct menu *,(struct menu *menu));
+
+/* symbol.c */
+P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
+P(sym_change_count,int,);
+
+P(sym_lookup,struct symbol *,(const char *name, int isconst));
+P(sym_find,struct symbol *,(const char *name));
+P(sym_re_search,struct symbol **,(const char *pattern));
+P(sym_type_name,const char *,(enum symbol_type type));
+P(sym_calc_value,void,(struct symbol *sym));
+P(sym_get_type,enum symbol_type,(struct symbol *sym));
+P(sym_tristate_within_range,bool,(struct symbol *sym,tristate tri));
+P(sym_set_tristate_value,bool,(struct symbol *sym,tristate tri));
+P(sym_toggle_tristate_value,tristate,(struct symbol *sym));
+P(sym_string_valid,bool,(struct symbol *sym, const char *newval));
+P(sym_string_within_range,bool,(struct symbol *sym, const char *str));
+P(sym_set_string_value,bool,(struct symbol *sym, const char *newval));
+P(sym_is_changable,bool,(struct symbol *sym));
+P(sym_get_choice_prop,struct property *,(struct symbol *sym));
+P(sym_get_default_prop,struct property *,(struct symbol *sym));
+P(sym_get_string_value,const char *,(struct symbol *sym));
+
+P(prop_get_type_name,const char *,(enum prop_type type));
+
+/* expr.c */
+P(expr_compare_type,int,(enum expr_type t1, enum expr_type t2));
+P(expr_print,void,(struct expr *e, void (*fn)(void *, const char *), void *data, int prevtoken));
--- /dev/null
+This is NOT the official version of dialog. This version has been
+significantly modified from the original. It is for use by the Linux
+kernel configuration script. Please do not bother Savio Lam with
+questions about this program.
--- /dev/null
+# Makefile to build lxdialog package
+#
+
+check-lxdialog := $(srctree)/$(src)/check-lxdialog.sh
+
+# Use reursively expanded variables so we do not call gcc unless
+# we really need to do so. (Do not call gcc as part of make mrproper)
+HOST_EXTRACFLAGS = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags)
+HOST_LOADLIBES = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags $(HOSTCC))
+
+HOST_EXTRACFLAGS += -DLOCALE
+
+PHONY += dochecklxdialog
+$(obj)/dochecklxdialog:
+ $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_LOADLIBES)
+
+hostprogs-y := lxdialog
+always := $(hostprogs-y) dochecklxdialog
+
+lxdialog-objs := checklist.o menubox.o textbox.o yesno.o inputbox.o \
+ util.o lxdialog.o msgbox.o
--- /dev/null
+#!/bin/sh
+# Check ncurses compatibility
+
+# What library to link
+ldflags()
+{
+ $cc -print-file-name=libncursesw.so | grep -q /
+ if [ $? -eq 0 ]; then
+ echo '-lncursesw'
+ exit
+ fi
+ $cc -print-file-name=libncurses.so | grep -q /
+ if [ $? -eq 0 ]; then
+ echo '-lncurses'
+ exit
+ fi
+ $cc -print-file-name=libcurses.so | grep -q /
+ if [ $? -eq 0 ]; then
+ echo '-lcurses'
+ exit
+ fi
+ exit 1
+}
+
+# Where is ncurses.h?
+ccflags()
+{
+ if [ -f /usr/include/ncurses/ncurses.h ]; then
+ echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses.h>"'
+ elif [ -f /usr/include/ncurses/curses.h ]; then
+ echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses/curses.h>"'
+ elif [ -f /usr/include/ncurses.h ]; then
+ echo '-DCURSES_LOC="<ncurses.h>"'
+ else
+ echo '-DCURSES_LOC="<curses.h>"'
+ fi
+}
+
+# Temp file, try to clean up after us
+tmp=.lxdialog.tmp
+trap "rm -f $tmp" 0 1 2 3 15
+
+# Check if we can link to ncurses
+check() {
+ echo "main() {}" | $cc -xc - -o $tmp 2> /dev/null
+ if [ $? != 0 ]; then
+ echo " *** Unable to find the ncurses libraries." 1>&2
+ echo " *** make menuconfig require the ncurses libraries" 1>&2
+ echo " *** " 1>&2
+ echo " *** Install ncurses (ncurses-devel) and try again" 1>&2
+ echo " *** " 1>&2
+ exit 1
+ fi
+}
+
+usage() {
+ printf "Usage: $0 [-check compiler options|-header|-library]\n"
+}
+
+if [ $# == 0 ]; then
+ usage
+ exit 1
+fi
+
+cc=""
+case "$1" in
+ "-check")
+ shift
+ cc="$@"
+ check
+ ;;
+ "-ccflags")
+ ccflags
+ ;;
+ "-ldflags")
+ shift
+ cc="$@"
+ ldflags
+ ;;
+ "*")
+ usage
+ exit 1
+ ;;
+esac
--- /dev/null
+/*
+ * checklist.c -- implements the checklist box
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * Stuart Herbert - S.Herbert@sheffield.ac.uk: radiolist extension
+ * Alessandro Rubini - rubini@ipvvis.unipv.it: merged the two
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dialog.h"
+
+static int list_width, check_x, item_x;
+
+/*
+ * Print list item
+ */
+static void print_item(WINDOW * win, const char *item, int status, int choice,
+ int selected)
+{
+ int i;
+
+ /* Clear 'residue' of last item */
+ wattrset(win, menubox_attr);
+ wmove(win, choice, 0);
+ for (i = 0; i < list_width; i++)
+ waddch(win, ' ');
+
+ wmove(win, choice, check_x);
+ wattrset(win, selected ? check_selected_attr : check_attr);
+ wprintw(win, "(%c)", status ? 'X' : ' ');
+
+ wattrset(win, selected ? tag_selected_attr : tag_attr);
+ mvwaddch(win, choice, item_x, item[0]);
+ wattrset(win, selected ? item_selected_attr : item_attr);
+ waddstr(win, (char *)item + 1);
+ if (selected) {
+ wmove(win, choice, check_x + 1);
+ wrefresh(win);
+ }
+}
+
+/*
+ * Print the scroll indicators.
+ */
+static void print_arrows(WINDOW * win, int choice, int item_no, int scroll,
+ int y, int x, int height)
+{
+ wmove(win, y, x);
+
+ if (scroll > 0) {
+ wattrset(win, uarrow_attr);
+ waddch(win, ACS_UARROW);
+ waddstr(win, "(-)");
+ } else {
+ wattrset(win, menubox_attr);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ }
+
+ y = y + height + 1;
+ wmove(win, y, x);
+
+ if ((height < item_no) && (scroll + choice < item_no - 1)) {
+ wattrset(win, darrow_attr);
+ waddch(win, ACS_DARROW);
+ waddstr(win, "(+)");
+ } else {
+ wattrset(win, menubox_border_attr);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ }
+}
+
+/*
+ * Display the termination buttons
+ */
+static void print_buttons(WINDOW * dialog, int height, int width, int selected)
+{
+ int x = width / 2 - 11;
+ int y = height - 2;
+
+ print_button(dialog, "Select", y, x, selected == 0);
+ print_button(dialog, " Help ", y, x + 14, selected == 1);
+
+ wmove(dialog, y, x + 1 + 14 * selected);
+ wrefresh(dialog);
+}
+
+/*
+ * Display a dialog box with a list of options that can be turned on or off
+ * in the style of radiolist (only one option turned on at a time).
+ */
+int dialog_checklist(const char *title, const char *prompt, int height,
+ int width, int list_height, int item_no,
+ const char *const *items)
+{
+ int i, x, y, box_x, box_y;
+ int key = 0, button = 0, choice = 0, scroll = 0, max_choice, *status;
+ WINDOW *dialog, *list;
+
+ /* Allocate space for storing item on/off status */
+ if ((status = malloc(sizeof(int) * item_no)) == NULL) {
+ endwin();
+ fprintf(stderr,
+ "\nCan't allocate memory in dialog_checklist().\n");
+ exit(-1);
+ }
+
+ /* Initializes status */
+ for (i = 0; i < item_no; i++) {
+ status[i] = !strcasecmp(items[i * 3 + 2], "on");
+ if ((!choice && status[i])
+ || !strcasecmp(items[i * 3 + 2], "selected"))
+ choice = i + 1;
+ }
+ if (choice)
+ choice--;
+
+ max_choice = MIN(list_height, item_no);
+
+ /* center dialog box on screen */
+ x = (COLS - width) / 2;
+ y = (LINES - height) / 2;
+
+ draw_shadow(stdscr, y, x, height, width);
+
+ dialog = newwin(height, width, y, x);
+ keypad(dialog, TRUE);
+
+ draw_box(dialog, 0, 0, height, width, dialog_attr, border_attr);
+ wattrset(dialog, border_attr);
+ mvwaddch(dialog, height - 3, 0, ACS_LTEE);
+ for (i = 0; i < width - 2; i++)
+ waddch(dialog, ACS_HLINE);
+ wattrset(dialog, dialog_attr);
+ waddch(dialog, ACS_RTEE);
+
+ print_title(dialog, title, width);
+
+ wattrset(dialog, dialog_attr);
+ print_autowrap(dialog, prompt, width - 2, 1, 3);
+
+ list_width = width - 6;
+ box_y = height - list_height - 5;
+ box_x = (width - list_width) / 2 - 1;
+
+ /* create new window for the list */
+ list = subwin(dialog, list_height, list_width, y + box_y + 1,
+ x + box_x + 1);
+
+ keypad(list, TRUE);
+
+ /* draw a box around the list items */
+ draw_box(dialog, box_y, box_x, list_height + 2, list_width + 2,
+ menubox_border_attr, menubox_attr);
+
+ /* Find length of longest item in order to center checklist */
+ check_x = 0;
+ for (i = 0; i < item_no; i++)
+ check_x = MAX(check_x, +strlen(items[i * 3 + 1]) + 4);
+
+ check_x = (list_width - check_x) / 2;
+ item_x = check_x + 4;
+
+ if (choice >= list_height) {
+ scroll = choice - list_height + 1;
+ choice -= scroll;
+ }
+
+ /* Print the list */
+ for (i = 0; i < max_choice; i++) {
+ print_item(list, items[(scroll + i) * 3 + 1],
+ status[i + scroll], i, i == choice);
+ }
+
+ print_arrows(dialog, choice, item_no, scroll,
+ box_y, box_x + check_x + 5, list_height);
+
+ print_buttons(dialog, height, width, 0);
+
+ wnoutrefresh(dialog);
+ wnoutrefresh(list);
+ doupdate();
+
+ while (key != ESC) {
+ key = wgetch(dialog);
+
+ for (i = 0; i < max_choice; i++)
+ if (toupper(key) ==
+ toupper(items[(scroll + i) * 3 + 1][0]))
+ break;
+
+ if (i < max_choice || key == KEY_UP || key == KEY_DOWN ||
+ key == '+' || key == '-') {
+ if (key == KEY_UP || key == '-') {
+ if (!choice) {
+ if (!scroll)
+ continue;
+ /* Scroll list down */
+ if (list_height > 1) {
+ /* De-highlight current first item */
+ print_item(list, items[scroll * 3 + 1],
+ status[scroll], 0, FALSE);
+ scrollok(list, TRUE);
+ wscrl(list, -1);
+ scrollok(list, FALSE);
+ }
+ scroll--;
+ print_item(list, items[scroll * 3 + 1], status[scroll], 0, TRUE);
+ print_arrows(dialog, choice, item_no,
+ scroll, box_y, box_x + check_x + 5, list_height);
+
+ wnoutrefresh(dialog);
+ wrefresh(list);
+
+ continue; /* wait for another key press */
+ } else
+ i = choice - 1;
+ } else if (key == KEY_DOWN || key == '+') {
+ if (choice == max_choice - 1) {
+ if (scroll + choice >= item_no - 1)
+ continue;
+ /* Scroll list up */
+ if (list_height > 1) {
+ /* De-highlight current last item before scrolling up */
+ print_item(list, items[(scroll + max_choice - 1) * 3 + 1],
+ status[scroll + max_choice - 1],
+ max_choice - 1, FALSE);
+ scrollok(list, TRUE);
+ wscrl(list, 1);
+ scrollok(list, FALSE);
+ }
+ scroll++;
+ print_item(list, items[(scroll + max_choice - 1) * 3 + 1],
+ status[scroll + max_choice - 1], max_choice - 1, TRUE);
+
+ print_arrows(dialog, choice, item_no,
+ scroll, box_y, box_x + check_x + 5, list_height);
+
+ wnoutrefresh(dialog);
+ wrefresh(list);
+
+ continue; /* wait for another key press */
+ } else
+ i = choice + 1;
+ }
+ if (i != choice) {
+ /* De-highlight current item */
+ print_item(list, items[(scroll + choice) * 3 + 1],
+ status[scroll + choice], choice, FALSE);
+ /* Highlight new item */
+ choice = i;
+ print_item(list, items[(scroll + choice) * 3 + 1],
+ status[scroll + choice], choice, TRUE);
+ wnoutrefresh(dialog);
+ wrefresh(list);
+ }
+ continue; /* wait for another key press */
+ }
+ switch (key) {
+ case 'H':
+ case 'h':
+ case '?':
+ fprintf(stderr, "%s", items[(scroll + choice) * 3]);
+ delwin(dialog);
+ free(status);
+ return 1;
+ case TAB:
+ case KEY_LEFT:
+ case KEY_RIGHT:
+ button = ((key == KEY_LEFT ? --button : ++button) < 0)
+ ? 1 : (button > 1 ? 0 : button);
+
+ print_buttons(dialog, height, width, button);
+ wrefresh(dialog);
+ break;
+ case 'S':
+ case 's':
+ case ' ':
+ case '\n':
+ if (!button) {
+ if (!status[scroll + choice]) {
+ for (i = 0; i < item_no; i++)
+ status[i] = 0;
+ status[scroll + choice] = 1;
+ for (i = 0; i < max_choice; i++)
+ print_item(list, items[(scroll + i) * 3 + 1],
+ status[scroll + i], i, i == choice);
+ }
+ wnoutrefresh(dialog);
+ wrefresh(list);
+
+ for (i = 0; i < item_no; i++)
+ if (status[i])
+ fprintf(stderr, "%s", items[i * 3]);
+ } else
+ fprintf(stderr, "%s", items[(scroll + choice) * 3]);
+ delwin(dialog);
+ free(status);
+ return button;
+ case 'X':
+ case 'x':
+ key = ESC;
+ case ESC:
+ break;
+ }
+
+ /* Now, update everything... */
+ doupdate();
+ }
+
+ delwin(dialog);
+ free(status);
+ return -1; /* ESC pressed */
+}
--- /dev/null
+/*
+ * colors.h -- color attribute definitions
+ *
+ * AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Default color definitions
+ *
+ * *_FG = foreground
+ * *_BG = background
+ * *_HL = highlight?
+ */
+#define SCREEN_FG COLOR_CYAN
+#define SCREEN_BG COLOR_BLUE
+#define SCREEN_HL TRUE
+
+#define SHADOW_FG COLOR_BLACK
+#define SHADOW_BG COLOR_BLACK
+#define SHADOW_HL TRUE
+
+#define DIALOG_FG COLOR_BLACK
+#define DIALOG_BG COLOR_WHITE
+#define DIALOG_HL FALSE
+
+#define TITLE_FG COLOR_YELLOW
+#define TITLE_BG COLOR_WHITE
+#define TITLE_HL TRUE
+
+#define BORDER_FG COLOR_WHITE
+#define BORDER_BG COLOR_WHITE
+#define BORDER_HL TRUE
+
+#define BUTTON_ACTIVE_FG COLOR_WHITE
+#define BUTTON_ACTIVE_BG COLOR_BLUE
+#define BUTTON_ACTIVE_HL TRUE
+
+#define BUTTON_INACTIVE_FG COLOR_BLACK
+#define BUTTON_INACTIVE_BG COLOR_WHITE
+#define BUTTON_INACTIVE_HL FALSE
+
+#define BUTTON_KEY_ACTIVE_FG COLOR_WHITE
+#define BUTTON_KEY_ACTIVE_BG COLOR_BLUE
+#define BUTTON_KEY_ACTIVE_HL TRUE
+
+#define BUTTON_KEY_INACTIVE_FG COLOR_RED
+#define BUTTON_KEY_INACTIVE_BG COLOR_WHITE
+#define BUTTON_KEY_INACTIVE_HL FALSE
+
+#define BUTTON_LABEL_ACTIVE_FG COLOR_YELLOW
+#define BUTTON_LABEL_ACTIVE_BG COLOR_BLUE
+#define BUTTON_LABEL_ACTIVE_HL TRUE
+
+#define BUTTON_LABEL_INACTIVE_FG COLOR_BLACK
+#define BUTTON_LABEL_INACTIVE_BG COLOR_WHITE
+#define BUTTON_LABEL_INACTIVE_HL TRUE
+
+#define INPUTBOX_FG COLOR_BLACK
+#define INPUTBOX_BG COLOR_WHITE
+#define INPUTBOX_HL FALSE
+
+#define INPUTBOX_BORDER_FG COLOR_BLACK
+#define INPUTBOX_BORDER_BG COLOR_WHITE
+#define INPUTBOX_BORDER_HL FALSE
+
+#define SEARCHBOX_FG COLOR_BLACK
+#define SEARCHBOX_BG COLOR_WHITE
+#define SEARCHBOX_HL FALSE
+
+#define SEARCHBOX_TITLE_FG COLOR_YELLOW
+#define SEARCHBOX_TITLE_BG COLOR_WHITE
+#define SEARCHBOX_TITLE_HL TRUE
+
+#define SEARCHBOX_BORDER_FG COLOR_WHITE
+#define SEARCHBOX_BORDER_BG COLOR_WHITE
+#define SEARCHBOX_BORDER_HL TRUE
+
+#define POSITION_INDICATOR_FG COLOR_YELLOW
+#define POSITION_INDICATOR_BG COLOR_WHITE
+#define POSITION_INDICATOR_HL TRUE
+
+#define MENUBOX_FG COLOR_BLACK
+#define MENUBOX_BG COLOR_WHITE
+#define MENUBOX_HL FALSE
+
+#define MENUBOX_BORDER_FG COLOR_WHITE
+#define MENUBOX_BORDER_BG COLOR_WHITE
+#define MENUBOX_BORDER_HL TRUE
+
+#define ITEM_FG COLOR_BLACK
+#define ITEM_BG COLOR_WHITE
+#define ITEM_HL FALSE
+
+#define ITEM_SELECTED_FG COLOR_WHITE
+#define ITEM_SELECTED_BG COLOR_BLUE
+#define ITEM_SELECTED_HL TRUE
+
+#define TAG_FG COLOR_YELLOW
+#define TAG_BG COLOR_WHITE
+#define TAG_HL TRUE
+
+#define TAG_SELECTED_FG COLOR_YELLOW
+#define TAG_SELECTED_BG COLOR_BLUE
+#define TAG_SELECTED_HL TRUE
+
+#define TAG_KEY_FG COLOR_YELLOW
+#define TAG_KEY_BG COLOR_WHITE
+#define TAG_KEY_HL TRUE
+
+#define TAG_KEY_SELECTED_FG COLOR_YELLOW
+#define TAG_KEY_SELECTED_BG COLOR_BLUE
+#define TAG_KEY_SELECTED_HL TRUE
+
+#define CHECK_FG COLOR_BLACK
+#define CHECK_BG COLOR_WHITE
+#define CHECK_HL FALSE
+
+#define CHECK_SELECTED_FG COLOR_WHITE
+#define CHECK_SELECTED_BG COLOR_BLUE
+#define CHECK_SELECTED_HL TRUE
+
+#define UARROW_FG COLOR_GREEN
+#define UARROW_BG COLOR_WHITE
+#define UARROW_HL TRUE
+
+#define DARROW_FG COLOR_GREEN
+#define DARROW_BG COLOR_WHITE
+#define DARROW_HL TRUE
+
+/* End of default color definitions */
+
+#define C_ATTR(x,y) ((x ? A_BOLD : 0) | COLOR_PAIR((y)))
+#define COLOR_NAME_LEN 10
+#define COLOR_COUNT 8
+
+/*
+ * Global variables
+ */
+
+extern int color_table[][3];
--- /dev/null
+/*
+ * dialog.h -- common declarations for all dialog modules
+ *
+ * AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef __sun__
+#define CURS_MACROS
+#endif
+#include CURSES_LOC
+
+/*
+ * Colors in ncurses 1.9.9e do not work properly since foreground and
+ * background colors are OR'd rather than separately masked. This version
+ * of dialog was hacked to work with ncurses 1.9.9e, making it incompatible
+ * with standard curses. The simplest fix (to make this work with standard
+ * curses) uses the wbkgdset() function, not used in the original hack.
+ * Turn it off if we're building with 1.9.9e, since it just confuses things.
+ */
+#if defined(NCURSES_VERSION) && defined(_NEED_WRAP) && !defined(GCC_PRINTFLIKE)
+#define OLD_NCURSES 1
+#undef wbkgdset
+#define wbkgdset(w,p) /*nothing */
+#else
+#define OLD_NCURSES 0
+#endif
+
+#define TR(params) _tracef params
+
+#define ESC 27
+#define TAB 9
+#define MAX_LEN 2048
+#define BUF_SIZE (10*1024)
+#define MIN(x,y) (x < y ? x : y)
+#define MAX(x,y) (x > y ? x : y)
+
+#ifndef ACS_ULCORNER
+#define ACS_ULCORNER '+'
+#endif
+#ifndef ACS_LLCORNER
+#define ACS_LLCORNER '+'
+#endif
+#ifndef ACS_URCORNER
+#define ACS_URCORNER '+'
+#endif
+#ifndef ACS_LRCORNER
+#define ACS_LRCORNER '+'
+#endif
+#ifndef ACS_HLINE
+#define ACS_HLINE '-'
+#endif
+#ifndef ACS_VLINE
+#define ACS_VLINE '|'
+#endif
+#ifndef ACS_LTEE
+#define ACS_LTEE '+'
+#endif
+#ifndef ACS_RTEE
+#define ACS_RTEE '+'
+#endif
+#ifndef ACS_UARROW
+#define ACS_UARROW '^'
+#endif
+#ifndef ACS_DARROW
+#define ACS_DARROW 'v'
+#endif
+
+/*
+ * Attribute names
+ */
+#define screen_attr attributes[0]
+#define shadow_attr attributes[1]
+#define dialog_attr attributes[2]
+#define title_attr attributes[3]
+#define border_attr attributes[4]
+#define button_active_attr attributes[5]
+#define button_inactive_attr attributes[6]
+#define button_key_active_attr attributes[7]
+#define button_key_inactive_attr attributes[8]
+#define button_label_active_attr attributes[9]
+#define button_label_inactive_attr attributes[10]
+#define inputbox_attr attributes[11]
+#define inputbox_border_attr attributes[12]
+#define searchbox_attr attributes[13]
+#define searchbox_title_attr attributes[14]
+#define searchbox_border_attr attributes[15]
+#define position_indicator_attr attributes[16]
+#define menubox_attr attributes[17]
+#define menubox_border_attr attributes[18]
+#define item_attr attributes[19]
+#define item_selected_attr attributes[20]
+#define tag_attr attributes[21]
+#define tag_selected_attr attributes[22]
+#define tag_key_attr attributes[23]
+#define tag_key_selected_attr attributes[24]
+#define check_attr attributes[25]
+#define check_selected_attr attributes[26]
+#define uarrow_attr attributes[27]
+#define darrow_attr attributes[28]
+
+/* number of attributes */
+#define ATTRIBUTE_COUNT 29
+
+/*
+ * Global variables
+ */
+extern bool use_colors;
+extern bool use_shadow;
+
+extern chtype attributes[];
+
+extern const char *backtitle;
+
+/*
+ * Function prototypes
+ */
+extern void create_rc(const char *filename);
+extern int parse_rc(void);
+
+void init_dialog(void);
+void end_dialog(void);
+void attr_clear(WINDOW * win, int height, int width, chtype attr);
+void dialog_clear(void);
+void color_setup(void);
+void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x);
+void print_button(WINDOW * win, const char *label, int y, int x, int selected);
+void print_title(WINDOW *dialog, const char *title, int width);
+void draw_box(WINDOW * win, int y, int x, int height, int width, chtype box,
+ chtype border);
+void draw_shadow(WINDOW * win, int y, int x, int height, int width);
+
+int first_alpha(const char *string, const char *exempt);
+int dialog_yesno(const char *title, const char *prompt, int height, int width);
+int dialog_msgbox(const char *title, const char *prompt, int height,
+ int width, int pause);
+int dialog_textbox(const char *title, const char *file, int height, int width);
+int dialog_menu(const char *title, const char *prompt, int height, int width,
+ int menu_height, const char *choice, int item_no,
+ const char *const *items);
+int dialog_checklist(const char *title, const char *prompt, int height,
+ int width, int list_height, int item_no,
+ const char *const *items);
+extern char dialog_input_result[];
+int dialog_inputbox(const char *title, const char *prompt, int height,
+ int width, const char *init);
+
+/*
+ * This is the base for fictitious keys, which activate
+ * the buttons.
+ *
+ * Mouse-generated keys are the following:
+ * -- the first 32 are used as numbers, in addition to '0'-'9'
+ * -- the lowercase are used to signal mouse-enter events (M_EVENT + 'o')
+ * -- uppercase chars are used to invoke the button (M_EVENT + 'O')
+ */
+#define M_EVENT (KEY_MAX+1)
--- /dev/null
+/*
+ * inputbox.c -- implements the input box
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dialog.h"
+
+char dialog_input_result[MAX_LEN + 1];
+
+/*
+ * Print the termination buttons
+ */
+static void print_buttons(WINDOW * dialog, int height, int width, int selected)
+{
+ int x = width / 2 - 11;
+ int y = height - 2;
+
+ print_button(dialog, " Ok ", y, x, selected == 0);
+ print_button(dialog, " Help ", y, x + 14, selected == 1);
+
+ wmove(dialog, y, x + 1 + 14 * selected);
+ wrefresh(dialog);
+}
+
+/*
+ * Display a dialog box for inputing a string
+ */
+int dialog_inputbox(const char *title, const char *prompt, int height, int width,
+ const char *init)
+{
+ int i, x, y, box_y, box_x, box_width;
+ int input_x = 0, scroll = 0, key = 0, button = -1;
+ char *instr = dialog_input_result;
+ WINDOW *dialog;
+
+ /* center dialog box on screen */
+ x = (COLS - width) / 2;
+ y = (LINES - height) / 2;
+
+ draw_shadow(stdscr, y, x, height, width);
+
+ dialog = newwin(height, width, y, x);
+ keypad(dialog, TRUE);
+
+ draw_box(dialog, 0, 0, height, width, dialog_attr, border_attr);
+ wattrset(dialog, border_attr);
+ mvwaddch(dialog, height - 3, 0, ACS_LTEE);
+ for (i = 0; i < width - 2; i++)
+ waddch(dialog, ACS_HLINE);
+ wattrset(dialog, dialog_attr);
+ waddch(dialog, ACS_RTEE);
+
+ print_title(dialog, title, width);
+
+ wattrset(dialog, dialog_attr);
+ print_autowrap(dialog, prompt, width - 2, 1, 3);
+
+ /* Draw the input field box */
+ box_width = width - 6;
+ getyx(dialog, y, x);
+ box_y = y + 2;
+ box_x = (width - box_width) / 2;
+ draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2, border_attr, dialog_attr);
+
+ print_buttons(dialog, height, width, 0);
+
+ /* Set up the initial value */
+ wmove(dialog, box_y, box_x);
+ wattrset(dialog, inputbox_attr);
+
+ if (!init)
+ instr[0] = '\0';
+ else
+ strcpy(instr, init);
+
+ input_x = strlen(instr);
+
+ if (input_x >= box_width) {
+ scroll = input_x - box_width + 1;
+ input_x = box_width - 1;
+ for (i = 0; i < box_width - 1; i++)
+ waddch(dialog, instr[scroll + i]);
+ } else {
+ waddstr(dialog, instr);
+ }
+
+ wmove(dialog, box_y, box_x + input_x);
+
+ wrefresh(dialog);
+
+ while (key != ESC) {
+ key = wgetch(dialog);
+
+ if (button == -1) { /* Input box selected */
+ switch (key) {
+ case TAB:
+ case KEY_UP:
+ case KEY_DOWN:
+ break;
+ case KEY_LEFT:
+ continue;
+ case KEY_RIGHT:
+ continue;
+ case KEY_BACKSPACE:
+ case 127:
+ if (input_x || scroll) {
+ wattrset(dialog, inputbox_attr);
+ if (!input_x) {
+ scroll = scroll < box_width - 1 ? 0 : scroll - (box_width - 1);
+ wmove(dialog, box_y, box_x);
+ for (i = 0; i < box_width; i++)
+ waddch(dialog,
+ instr[scroll + input_x + i] ?
+ instr[scroll + input_x + i] : ' ');
+ input_x = strlen(instr) - scroll;
+ } else
+ input_x--;
+ instr[scroll + input_x] = '\0';
+ mvwaddch(dialog, box_y, input_x + box_x, ' ');
+ wmove(dialog, box_y, input_x + box_x);
+ wrefresh(dialog);
+ }
+ continue;
+ default:
+ if (key < 0x100 && isprint(key)) {
+ if (scroll + input_x < MAX_LEN) {
+ wattrset(dialog, inputbox_attr);
+ instr[scroll + input_x] = key;
+ instr[scroll + input_x + 1] = '\0';
+ if (input_x == box_width - 1) {
+ scroll++;
+ wmove(dialog, box_y, box_x);
+ for (i = 0; i < box_width - 1; i++)
+ waddch(dialog, instr [scroll + i]);
+ } else {
+ wmove(dialog, box_y, input_x++ + box_x);
+ waddch(dialog, key);
+ }
+ wrefresh(dialog);
+ } else
+ flash(); /* Alarm user about overflow */
+ continue;
+ }
+ }
+ }
+ switch (key) {
+ case 'O':
+ case 'o':
+ delwin(dialog);
+ return 0;
+ case 'H':
+ case 'h':
+ delwin(dialog);
+ return 1;
+ case KEY_UP:
+ case KEY_LEFT:
+ switch (button) {
+ case -1:
+ button = 1; /* Indicates "Cancel" button is selected */
+ print_buttons(dialog, height, width, 1);
+ break;
+ case 0:
+ button = -1; /* Indicates input box is selected */
+ print_buttons(dialog, height, width, 0);
+ wmove(dialog, box_y, box_x + input_x);
+ wrefresh(dialog);
+ break;
+ case 1:
+ button = 0; /* Indicates "OK" button is selected */
+ print_buttons(dialog, height, width, 0);
+ break;
+ }
+ break;
+ case TAB:
+ case KEY_DOWN:
+ case KEY_RIGHT:
+ switch (button) {
+ case -1:
+ button = 0; /* Indicates "OK" button is selected */
+ print_buttons(dialog, height, width, 0);
+ break;
+ case 0:
+ button = 1; /* Indicates "Cancel" button is selected */
+ print_buttons(dialog, height, width, 1);
+ break;
+ case 1:
+ button = -1; /* Indicates input box is selected */
+ print_buttons(dialog, height, width, 0);
+ wmove(dialog, box_y, box_x + input_x);
+ wrefresh(dialog);
+ break;
+ }
+ break;
+ case ' ':
+ case '\n':
+ delwin(dialog);
+ return (button == -1 ? 0 : button);
+ case 'X':
+ case 'x':
+ key = ESC;
+ case ESC:
+ break;
+ }
+ }
+
+ delwin(dialog);
+ return -1; /* ESC pressed */
+}
--- /dev/null
+/*
+ * dialog - Display simple dialog boxes from shell scripts
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dialog.h"
+
+static void Usage(const char *name);
+
+typedef int (jumperFn) (const char *title, int argc, const char *const *argv);
+
+struct Mode {
+ char *name;
+ int argmin, argmax, argmod;
+ jumperFn *jumper;
+};
+
+jumperFn j_menu, j_radiolist, j_yesno, j_textbox, j_inputbox;
+jumperFn j_msgbox, j_infobox;
+
+static struct Mode modes[] = {
+ {"--menu", 9, 0, 3, j_menu},
+ {"--radiolist", 9, 0, 3, j_radiolist},
+ {"--yesno", 5, 5, 1, j_yesno},
+ {"--textbox", 5, 5, 1, j_textbox},
+ {"--inputbox", 5, 6, 1, j_inputbox},
+ {"--msgbox", 5, 5, 1, j_msgbox},
+ {"--infobox", 5, 5, 1, j_infobox},
+ {NULL, 0, 0, 0, NULL}
+};
+
+static struct Mode *modePtr;
+
+#ifdef LOCALE
+#include <locale.h>
+#endif
+
+int main(int argc, const char *const *argv)
+{
+ int offset = 0, opt_clear = 0, end_common_opts = 0, retval;
+ const char *title = NULL;
+
+#ifdef LOCALE
+ (void)setlocale(LC_ALL, "");
+#endif
+
+#ifdef TRACE
+ trace(TRACE_CALLS | TRACE_UPDATE);
+#endif
+ if (argc < 2) {
+ Usage(argv[0]);
+ exit(-1);
+ }
+
+ while (offset < argc - 1 && !end_common_opts) { /* Common options */
+ if (!strcmp(argv[offset + 1], "--title")) {
+ if (argc - offset < 3 || title != NULL) {
+ Usage(argv[0]);
+ exit(-1);
+ } else {
+ title = argv[offset + 2];
+ offset += 2;
+ }
+ } else if (!strcmp(argv[offset + 1], "--backtitle")) {
+ if (backtitle != NULL) {
+ Usage(argv[0]);
+ exit(-1);
+ } else {
+ backtitle = argv[offset + 2];
+ offset += 2;
+ }
+ } else if (!strcmp(argv[offset + 1], "--clear")) {
+ if (opt_clear) { /* Hey, "--clear" can't appear twice! */
+ Usage(argv[0]);
+ exit(-1);
+ } else if (argc == 2) { /* we only want to clear the screen */
+ init_dialog();
+ refresh(); /* init_dialog() will clear the screen for us */
+ end_dialog();
+ return 0;
+ } else {
+ opt_clear = 1;
+ offset++;
+ }
+ } else /* no more common options */
+ end_common_opts = 1;
+ }
+
+ if (argc - 1 == offset) { /* no more options */
+ Usage(argv[0]);
+ exit(-1);
+ }
+ /* use a table to look for the requested mode, to avoid code duplication */
+
+ for (modePtr = modes; modePtr->name; modePtr++) /* look for the mode */
+ if (!strcmp(argv[offset + 1], modePtr->name))
+ break;
+
+ if (!modePtr->name)
+ Usage(argv[0]);
+ if (argc - offset < modePtr->argmin)
+ Usage(argv[0]);
+ if (modePtr->argmax && argc - offset > modePtr->argmax)
+ Usage(argv[0]);
+
+ init_dialog();
+ retval = (*(modePtr->jumper)) (title, argc - offset, argv + offset);
+
+ if (opt_clear) { /* clear screen before exit */
+ attr_clear(stdscr, LINES, COLS, screen_attr);
+ refresh();
+ }
+ end_dialog();
+
+ exit(retval);
+}
+
+/*
+ * Print program usage
+ */
+static void Usage(const char *name)
+{
+ fprintf(stderr, "\
+\ndialog, by Savio Lam (lam836@cs.cuhk.hk).\
+\n patched by Stuart Herbert (S.Herbert@shef.ac.uk)\
+\n modified/gutted for use as a Linux kernel config tool by \
+\n William Roadcap (roadcapw@cfw.com)\
+\n\
+\n* Display dialog boxes from shell scripts *\
+\n\
+\nUsage: %s --clear\
+\n %s [--title <title>] [--backtitle <backtitle>] --clear <Box options>\
+\n\
+\nBox options:\
+\n\
+\n --menu <text> <height> <width> <menu height> <tag1> <item1>...\
+\n --radiolist <text> <height> <width> <list height> <tag1> <item1> <status1>...\
+\n --textbox <file> <height> <width>\
+\n --inputbox <text> <height> <width> [<init>]\
+\n --yesno <text> <height> <width>\
+\n", name, name);
+ exit(-1);
+}
+
+/*
+ * These are the program jumpers
+ */
+
+int j_menu(const char *t, int ac, const char *const *av)
+{
+ return dialog_menu(t, av[2], atoi(av[3]), atoi(av[4]),
+ atoi(av[5]), av[6], (ac - 6) / 2, av + 7);
+}
+
+int j_radiolist(const char *t, int ac, const char *const *av)
+{
+ return dialog_checklist(t, av[2], atoi(av[3]), atoi(av[4]),
+ atoi(av[5]), (ac - 6) / 3, av + 6);
+}
+
+int j_textbox(const char *t, int ac, const char *const *av)
+{
+ return dialog_textbox(t, av[2], atoi(av[3]), atoi(av[4]));
+}
+
+int j_yesno(const char *t, int ac, const char *const *av)
+{
+ return dialog_yesno(t, av[2], atoi(av[3]), atoi(av[4]));
+}
+
+int j_inputbox(const char *t, int ac, const char *const *av)
+{
+ int ret = dialog_inputbox(t, av[2], atoi(av[3]), atoi(av[4]),
+ ac == 6 ? av[5] : (char *)NULL);
+ if (ret == 0)
+ fprintf(stderr, dialog_input_result);
+ return ret;
+}
+
+int j_msgbox(const char *t, int ac, const char *const *av)
+{
+ return dialog_msgbox(t, av[2], atoi(av[3]), atoi(av[4]), 1);
+}
+
+int j_infobox(const char *t, int ac, const char *const *av)
+{
+ return dialog_msgbox(t, av[2], atoi(av[3]), atoi(av[4]), 0);
+}
--- /dev/null
+/*
+ * menubox.c -- implements the menu box
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcapw@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Changes by Clifford Wolf (god@clifford.at)
+ *
+ * [ 1998-06-13 ]
+ *
+ * *) A bugfix for the Page-Down problem
+ *
+ * *) Formerly when I used Page Down and Page Up, the cursor would be set
+ * to the first position in the menu box. Now lxdialog is a bit
+ * smarter and works more like other menu systems (just have a look at
+ * it).
+ *
+ * *) Formerly if I selected something my scrolling would be broken because
+ * lxdialog is re-invoked by the Menuconfig shell script, can't
+ * remember the last scrolling position, and just sets it so that the
+ * cursor is at the bottom of the box. Now it writes the temporary file
+ * lxdialog.scrltmp which contains this information. The file is
+ * deleted by lxdialog if the user leaves a submenu or enters a new
+ * one, but it would be nice if Menuconfig could make another "rm -f"
+ * just to be sure. Just try it out - you will recognise a difference!
+ *
+ * [ 1998-06-14 ]
+ *
+ * *) Now lxdialog is crash-safe against broken "lxdialog.scrltmp" files
+ * and menus change their size on the fly.
+ *
+ * *) If for some reason the last scrolling position is not saved by
+ * lxdialog, it sets the scrolling so that the selected item is in the
+ * middle of the menu box, not at the bottom.
+ *
+ * 02 January 1999, Michael Elizabeth Chastain (mec@shout.net)
+ * Reset 'scroll' to 0 if the value from lxdialog.scrltmp is bogus.
+ * This fixes a bug in Menuconfig where using ' ' to descend into menus
+ * would leave mis-synchronized lxdialog.scrltmp files lying around,
+ * fscanf would read in 'scroll', and eventually that value would get used.
+ */
+
+#include "dialog.h"
+
+static int menu_width, item_x;
+
+/*
+ * Print menu item
+ */
+static void do_print_item(WINDOW * win, const char *item, int choice,
+ int selected, int hotkey)
+{
+ int j;
+ char *menu_item = malloc(menu_width + 1);
+
+ strncpy(menu_item, item, menu_width - item_x);
+ menu_item[menu_width] = 0;
+ j = first_alpha(menu_item, "YyNnMmHh");
+
+ /* Clear 'residue' of last item */
+ wattrset(win, menubox_attr);
+ wmove(win, choice, 0);
+#if OLD_NCURSES
+ {
+ int i;
+ for (i = 0; i < menu_width; i++)
+ waddch(win, ' ');
+ }
+#else
+ wclrtoeol(win);
+#endif
+ wattrset(win, selected ? item_selected_attr : item_attr);
+ mvwaddstr(win, choice, item_x, menu_item);
+ if (hotkey) {
+ wattrset(win, selected ? tag_key_selected_attr : tag_key_attr);
+ mvwaddch(win, choice, item_x + j, menu_item[j]);
+ }
+ if (selected) {
+ wmove(win, choice, item_x + 1);
+ }
+ free(menu_item);
+ wrefresh(win);
+}
+
+#define print_item(index, choice, selected) \
+do {\
+ int hotkey = (items[(index) * 2][0] != ':'); \
+ do_print_item(menu, items[(index) * 2 + 1], choice, selected, hotkey); \
+} while (0)
+
+/*
+ * Print the scroll indicators.
+ */
+static void print_arrows(WINDOW * win, int item_no, int scroll, int y, int x,
+ int height)
+{
+ int cur_y, cur_x;
+
+ getyx(win, cur_y, cur_x);
+
+ wmove(win, y, x);
+
+ if (scroll > 0) {
+ wattrset(win, uarrow_attr);
+ waddch(win, ACS_UARROW);
+ waddstr(win, "(-)");
+ } else {
+ wattrset(win, menubox_attr);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ }
+
+ y = y + height + 1;
+ wmove(win, y, x);
+ wrefresh(win);
+
+ if ((height < item_no) && (scroll + height < item_no)) {
+ wattrset(win, darrow_attr);
+ waddch(win, ACS_DARROW);
+ waddstr(win, "(+)");
+ } else {
+ wattrset(win, menubox_border_attr);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ waddch(win, ACS_HLINE);
+ }
+
+ wmove(win, cur_y, cur_x);
+ wrefresh(win);
+}
+
+/*
+ * Display the termination buttons.
+ */
+static void print_buttons(WINDOW * win, int height, int width, int selected)
+{
+ int x = width / 2 - 16;
+ int y = height - 2;
+
+ print_button(win, "Select", y, x, selected == 0);
+ print_button(win, " Exit ", y, x + 12, selected == 1);
+ print_button(win, " Help ", y, x + 24, selected == 2);
+
+ wmove(win, y, x + 1 + 12 * selected);
+ wrefresh(win);
+}
+
+/* scroll up n lines (n may be negative) */
+static void do_scroll(WINDOW *win, int *scroll, int n)
+{
+ /* Scroll menu up */
+ scrollok(win, TRUE);
+ wscrl(win, n);
+ scrollok(win, FALSE);
+ *scroll = *scroll + n;
+ wrefresh(win);
+}
+
+/*
+ * Display a menu for choosing among a number of options
+ */
+int dialog_menu(const char *title, const char *prompt, int height, int width,
+ int menu_height, const char *current, int item_no,
+ const char *const *items)
+{
+ int i, j, x, y, box_x, box_y;
+ int key = 0, button = 0, scroll = 0, choice = 0;
+ int first_item = 0, max_choice;
+ WINDOW *dialog, *menu;
+ FILE *f;
+
+ max_choice = MIN(menu_height, item_no);
+
+ /* center dialog box on screen */
+ x = (COLS - width) / 2;
+ y = (LINES - height) / 2;
+
+ draw_shadow(stdscr, y, x, height, width);
+
+ dialog = newwin(height, width, y, x);
+ keypad(dialog, TRUE);
+
+ draw_box(dialog, 0, 0, height, width, dialog_attr, border_attr);
+ wattrset(dialog, border_attr);
+ mvwaddch(dialog, height - 3, 0, ACS_LTEE);
+ for (i = 0; i < width - 2; i++)
+ waddch(dialog, ACS_HLINE);
+ wattrset(dialog, dialog_attr);
+ wbkgdset(dialog, dialog_attr & A_COLOR);
+ waddch(dialog, ACS_RTEE);
+
+ print_title(dialog, title, width);
+
+ wattrset(dialog, dialog_attr);
+ print_autowrap(dialog, prompt, width - 2, 1, 3);
+
+ menu_width = width - 6;
+ box_y = height - menu_height - 5;
+ box_x = (width - menu_width) / 2 - 1;
+
+ /* create new window for the menu */
+ menu = subwin(dialog, menu_height, menu_width,
+ y + box_y + 1, x + box_x + 1);
+ keypad(menu, TRUE);
+
+ /* draw a box around the menu items */
+ draw_box(dialog, box_y, box_x, menu_height + 2, menu_width + 2,
+ menubox_border_attr, menubox_attr);
+
+ item_x = (menu_width - 70) / 2;
+
+ /* Set choice to default item */
+ for (i = 0; i < item_no; i++)
+ if (strcmp(current, items[i * 2]) == 0)
+ choice = i;
+
+ /* get the scroll info from the temp file */
+ if ((f = fopen("lxdialog.scrltmp", "r")) != NULL) {
+ if ((fscanf(f, "%d\n", &scroll) == 1) && (scroll <= choice) &&
+ (scroll + max_choice > choice) && (scroll >= 0) &&
+ (scroll + max_choice <= item_no)) {
+ first_item = scroll;
+ choice = choice - scroll;
+ fclose(f);
+ } else {
+ scroll = 0;
+ remove("lxdialog.scrltmp");
+ fclose(f);
+ f = NULL;
+ }
+ }
+ if ((choice >= max_choice) || (f == NULL && choice >= max_choice / 2)) {
+ if (choice >= item_no - max_choice / 2)
+ scroll = first_item = item_no - max_choice;
+ else
+ scroll = first_item = choice - max_choice / 2;
+ choice = choice - scroll;
+ }
+
+ /* Print the menu */
+ for (i = 0; i < max_choice; i++) {
+ print_item(first_item + i, i, i == choice);
+ }
+
+ wnoutrefresh(menu);
+
+ print_arrows(dialog, item_no, scroll,
+ box_y, box_x + item_x + 1, menu_height);
+
+ print_buttons(dialog, height, width, 0);
+ wmove(menu, choice, item_x + 1);
+ wrefresh(menu);
+
+ while (key != ESC) {
+ key = wgetch(menu);
+
+ if (key < 256 && isalpha(key))
+ key = tolower(key);
+
+ if (strchr("ynmh", key))
+ i = max_choice;
+ else {
+ for (i = choice + 1; i < max_choice; i++) {
+ j = first_alpha(items[(scroll + i) * 2 + 1], "YyNnMmHh");
+ if (key == tolower(items[(scroll + i) * 2 + 1][j]))
+ break;
+ }
+ if (i == max_choice)
+ for (i = 0; i < max_choice; i++) {
+ j = first_alpha(items [(scroll + i) * 2 + 1], "YyNnMmHh");
+ if (key == tolower(items[(scroll + i) * 2 + 1][j]))
+ break;
+ }
+ }
+
+ if (i < max_choice ||
+ key == KEY_UP || key == KEY_DOWN ||
+ key == '-' || key == '+' ||
+ key == KEY_PPAGE || key == KEY_NPAGE) {
+ /* Remove highligt of current item */
+ print_item(scroll + choice, choice, FALSE);
+
+ if (key == KEY_UP || key == '-') {
+ if (choice < 2 && scroll) {
+ /* Scroll menu down */
+ do_scroll(menu, &scroll, -1);
+
+ print_item(scroll, 0, FALSE);
+ } else
+ choice = MAX(choice - 1, 0);
+
+ } else if (key == KEY_DOWN || key == '+') {
+ print_item(scroll+choice, choice, FALSE);
+
+ if ((choice > max_choice - 3) &&
+ (scroll + max_choice < item_no)) {
+ /* Scroll menu up */
+ do_scroll(menu, &scroll, 1);
+
+ print_item(scroll+max_choice - 1,
+ max_choice - 1, FALSE);
+ } else
+ choice = MIN(choice + 1, max_choice - 1);
+
+ } else if (key == KEY_PPAGE) {
+ scrollok(menu, TRUE);
+ for (i = 0; (i < max_choice); i++) {
+ if (scroll > 0) {
+ do_scroll(menu, &scroll, -1);
+ print_item(scroll, 0, FALSE);
+ } else {
+ if (choice > 0)
+ choice--;
+ }
+ }
+
+ } else if (key == KEY_NPAGE) {
+ for (i = 0; (i < max_choice); i++) {
+ if (scroll + max_choice < item_no) {
+ do_scroll(menu, &scroll, 1);
+ print_item(scroll+max_choice-1,
+ max_choice - 1, FALSE);
+ } else {
+ if (choice + 1 < max_choice)
+ choice++;
+ }
+ }
+ } else
+ choice = i;
+
+ print_item(scroll + choice, choice, TRUE);
+
+ print_arrows(dialog, item_no, scroll,
+ box_y, box_x + item_x + 1, menu_height);
+
+ wnoutrefresh(dialog);
+ wrefresh(menu);
+
+ continue; /* wait for another key press */
+ }
+
+ switch (key) {
+ case KEY_LEFT:
+ case TAB:
+ case KEY_RIGHT:
+ button = ((key == KEY_LEFT ? --button : ++button) < 0)
+ ? 2 : (button > 2 ? 0 : button);
+
+ print_buttons(dialog, height, width, button);
+ wrefresh(menu);
+ break;
+ case ' ':
+ case 's':
+ case 'y':
+ case 'n':
+ case 'm':
+ case '/':
+ /* save scroll info */
+ if ((f = fopen("lxdialog.scrltmp", "w")) != NULL) {
+ fprintf(f, "%d\n", scroll);
+ fclose(f);
+ }
+ delwin(dialog);
+ fprintf(stderr, "%s\n", items[(scroll + choice) * 2]);
+ switch (key) {
+ case 's':
+ return 3;
+ case 'y':
+ return 3;
+ case 'n':
+ return 4;
+ case 'm':
+ return 5;
+ case ' ':
+ return 6;
+ case '/':
+ return 7;
+ }
+ return 0;
+ case 'h':
+ case '?':
+ button = 2;
+ case '\n':
+ delwin(dialog);
+ if (button == 2)
+ fprintf(stderr, "%s \"%s\"\n",
+ items[(scroll + choice) * 2],
+ items[(scroll + choice) * 2 + 1] +
+ first_alpha(items [(scroll + choice) * 2 + 1], ""));
+ else
+ fprintf(stderr, "%s\n",
+ items[(scroll + choice) * 2]);
+
+ remove("lxdialog.scrltmp");
+ return button;
+ case 'e':
+ case 'x':
+ key = ESC;
+ case ESC:
+ break;
+ }
+ }
+
+ delwin(dialog);
+ remove("lxdialog.scrltmp");
+ return -1; /* ESC pressed */
+}
--- /dev/null
+/*
+ * msgbox.c -- implements the message box and info box
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcapw@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dialog.h"
+
+/*
+ * Display a message box. Program will pause and display an "OK" button
+ * if the parameter 'pause' is non-zero.
+ */
+int dialog_msgbox(const char *title, const char *prompt, int height, int width,
+ int pause)
+{
+ int i, x, y, key = 0;
+ WINDOW *dialog;
+
+ /* center dialog box on screen */
+ x = (COLS - width) / 2;
+ y = (LINES - height) / 2;
+
+ draw_shadow(stdscr, y, x, height, width);
+
+ dialog = newwin(height, width, y, x);
+ keypad(dialog, TRUE);
+
+ draw_box(dialog, 0, 0, height, width, dialog_attr, border_attr);
+
+ print_title(dialog, title, width);
+
+ wattrset(dialog, dialog_attr);
+ print_autowrap(dialog, prompt, width - 2, 1, 2);
+
+ if (pause) {
+ wattrset(dialog, border_attr);
+ mvwaddch(dialog, height - 3, 0, ACS_LTEE);
+ for (i = 0; i < width - 2; i++)
+ waddch(dialog, ACS_HLINE);
+ wattrset(dialog, dialog_attr);
+ waddch(dialog, ACS_RTEE);
+
+ print_button(dialog, " Ok ", height - 2, width / 2 - 4, TRUE);
+
+ wrefresh(dialog);
+ while (key != ESC && key != '\n' && key != ' ' &&
+ key != 'O' && key != 'o' && key != 'X' && key != 'x')
+ key = wgetch(dialog);
+ } else {
+ key = '\n';
+ wrefresh(dialog);
+ }
+
+ delwin(dialog);
+ return key == ESC ? -1 : 0;
+}
--- /dev/null
+/*
+ * textbox.c -- implements the text box
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dialog.h"
+
+static void back_lines(int n);
+static void print_page(WINDOW * win, int height, int width);
+static void print_line(WINDOW * win, int row, int width);
+static char *get_line(void);
+static void print_position(WINDOW * win, int height, int width);
+
+static int hscroll, fd, file_size, bytes_read;
+static int begin_reached = 1, end_reached, page_length;
+static char *buf, *page;
+
+/*
+ * Display text from a file in a dialog box.
+ */
+int dialog_textbox(const char *title, const char *file, int height, int width)
+{
+ int i, x, y, cur_x, cur_y, fpos, key = 0;
+ int passed_end;
+ char search_term[MAX_LEN + 1];
+ WINDOW *dialog, *text;
+
+ search_term[0] = '\0'; /* no search term entered yet */
+
+ /* Open input file for reading */
+ if ((fd = open(file, O_RDONLY)) == -1) {
+ endwin();
+ fprintf(stderr, "\nCan't open input file in dialog_textbox().\n");
+ exit(-1);
+ }
+ /* Get file size. Actually, 'file_size' is the real file size - 1,
+ since it's only the last byte offset from the beginning */
+ if ((file_size = lseek(fd, 0, SEEK_END)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError getting file size in dialog_textbox().\n");
+ exit(-1);
+ }
+ /* Restore file pointer to beginning of file after getting file size */
+ if (lseek(fd, 0, SEEK_SET) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in dialog_textbox().\n");
+ exit(-1);
+ }
+ /* Allocate space for read buffer */
+ if ((buf = malloc(BUF_SIZE + 1)) == NULL) {
+ endwin();
+ fprintf(stderr, "\nCan't allocate memory in dialog_textbox().\n");
+ exit(-1);
+ }
+ if ((bytes_read = read(fd, buf, BUF_SIZE)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError reading file in dialog_textbox().\n");
+ exit(-1);
+ }
+ buf[bytes_read] = '\0'; /* mark end of valid data */
+ page = buf; /* page is pointer to start of page to be displayed */
+
+ /* center dialog box on screen */
+ x = (COLS - width) / 2;
+ y = (LINES - height) / 2;
+
+ draw_shadow(stdscr, y, x, height, width);
+
+ dialog = newwin(height, width, y, x);
+ keypad(dialog, TRUE);
+
+ /* Create window for text region, used for scrolling text */
+ text = subwin(dialog, height - 4, width - 2, y + 1, x + 1);
+ wattrset(text, dialog_attr);
+ wbkgdset(text, dialog_attr & A_COLOR);
+
+ keypad(text, TRUE);
+
+ /* register the new window, along with its borders */
+ draw_box(dialog, 0, 0, height, width, dialog_attr, border_attr);
+
+ wattrset(dialog, border_attr);
+ mvwaddch(dialog, height - 3, 0, ACS_LTEE);
+ for (i = 0; i < width - 2; i++)
+ waddch(dialog, ACS_HLINE);
+ wattrset(dialog, dialog_attr);
+ wbkgdset(dialog, dialog_attr & A_COLOR);
+ waddch(dialog, ACS_RTEE);
+
+ print_title(dialog, title, width);
+
+ print_button(dialog, " Exit ", height - 2, width / 2 - 4, TRUE);
+ wnoutrefresh(dialog);
+ getyx(dialog, cur_y, cur_x); /* Save cursor position */
+
+ /* Print first page of text */
+ attr_clear(text, height - 4, width - 2, dialog_attr);
+ print_page(text, height - 4, width - 2);
+ print_position(dialog, height, width);
+ wmove(dialog, cur_y, cur_x); /* Restore cursor position */
+ wrefresh(dialog);
+
+ while ((key != ESC) && (key != '\n')) {
+ key = wgetch(dialog);
+ switch (key) {
+ case 'E': /* Exit */
+ case 'e':
+ case 'X':
+ case 'x':
+ delwin(dialog);
+ free(buf);
+ close(fd);
+ return 0;
+ case 'g': /* First page */
+ case KEY_HOME:
+ if (!begin_reached) {
+ begin_reached = 1;
+ /* First page not in buffer? */
+ if ((fpos = lseek(fd, 0, SEEK_CUR)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in dialog_textbox().\n");
+ exit(-1);
+ }
+ if (fpos > bytes_read) { /* Yes, we have to read it in */
+ if (lseek(fd, 0, SEEK_SET) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in "
+ "dialog_textbox().\n");
+ exit(-1);
+ }
+ if ((bytes_read =
+ read(fd, buf, BUF_SIZE)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError reading file in dialog_textbox().\n");
+ exit(-1);
+ }
+ buf[bytes_read] = '\0';
+ }
+ page = buf;
+ print_page(text, height - 4, width - 2);
+ print_position(dialog, height, width);
+ wmove(dialog, cur_y, cur_x); /* Restore cursor position */
+ wrefresh(dialog);
+ }
+ break;
+ case 'G': /* Last page */
+ case KEY_END:
+
+ end_reached = 1;
+ /* Last page not in buffer? */
+ if ((fpos = lseek(fd, 0, SEEK_CUR)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in dialog_textbox().\n");
+ exit(-1);
+ }
+ if (fpos < file_size) { /* Yes, we have to read it in */
+ if (lseek(fd, -BUF_SIZE, SEEK_END) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in dialog_textbox().\n");
+ exit(-1);
+ }
+ if ((bytes_read =
+ read(fd, buf, BUF_SIZE)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError reading file in dialog_textbox().\n");
+ exit(-1);
+ }
+ buf[bytes_read] = '\0';
+ }
+ page = buf + bytes_read;
+ back_lines(height - 4);
+ print_page(text, height - 4, width - 2);
+ print_position(dialog, height, width);
+ wmove(dialog, cur_y, cur_x); /* Restore cursor position */
+ wrefresh(dialog);
+ break;
+ case 'K': /* Previous line */
+ case 'k':
+ case KEY_UP:
+ if (!begin_reached) {
+ back_lines(page_length + 1);
+
+ /* We don't call print_page() here but use scrolling to ensure
+ faster screen update. However, 'end_reached' and
+ 'page_length' should still be updated, and 'page' should
+ point to start of next page. This is done by calling
+ get_line() in the following 'for' loop. */
+ scrollok(text, TRUE);
+ wscrl(text, -1); /* Scroll text region down one line */
+ scrollok(text, FALSE);
+ page_length = 0;
+ passed_end = 0;
+ for (i = 0; i < height - 4; i++) {
+ if (!i) {
+ /* print first line of page */
+ print_line(text, 0, width - 2);
+ wnoutrefresh(text);
+ } else
+ /* Called to update 'end_reached' and 'page' */
+ get_line();
+ if (!passed_end)
+ page_length++;
+ if (end_reached && !passed_end)
+ passed_end = 1;
+ }
+
+ print_position(dialog, height, width);
+ wmove(dialog, cur_y, cur_x); /* Restore cursor position */
+ wrefresh(dialog);
+ }
+ break;
+ case 'B': /* Previous page */
+ case 'b':
+ case KEY_PPAGE:
+ if (begin_reached)
+ break;
+ back_lines(page_length + height - 4);
+ print_page(text, height - 4, width - 2);
+ print_position(dialog, height, width);
+ wmove(dialog, cur_y, cur_x);
+ wrefresh(dialog);
+ break;
+ case 'J': /* Next line */
+ case 'j':
+ case KEY_DOWN:
+ if (!end_reached) {
+ begin_reached = 0;
+ scrollok(text, TRUE);
+ scroll(text); /* Scroll text region up one line */
+ scrollok(text, FALSE);
+ print_line(text, height - 5, width - 2);
+ wnoutrefresh(text);
+ print_position(dialog, height, width);
+ wmove(dialog, cur_y, cur_x); /* Restore cursor position */
+ wrefresh(dialog);
+ }
+ break;
+ case KEY_NPAGE: /* Next page */
+ case ' ':
+ if (end_reached)
+ break;
+
+ begin_reached = 0;
+ print_page(text, height - 4, width - 2);
+ print_position(dialog, height, width);
+ wmove(dialog, cur_y, cur_x);
+ wrefresh(dialog);
+ break;
+ case '0': /* Beginning of line */
+ case 'H': /* Scroll left */
+ case 'h':
+ case KEY_LEFT:
+ if (hscroll <= 0)
+ break;
+
+ if (key == '0')
+ hscroll = 0;
+ else
+ hscroll--;
+ /* Reprint current page to scroll horizontally */
+ back_lines(page_length);
+ print_page(text, height - 4, width - 2);
+ wmove(dialog, cur_y, cur_x);
+ wrefresh(dialog);
+ break;
+ case 'L': /* Scroll right */
+ case 'l':
+ case KEY_RIGHT:
+ if (hscroll >= MAX_LEN)
+ break;
+ hscroll++;
+ /* Reprint current page to scroll horizontally */
+ back_lines(page_length);
+ print_page(text, height - 4, width - 2);
+ wmove(dialog, cur_y, cur_x);
+ wrefresh(dialog);
+ break;
+ case ESC:
+ break;
+ }
+ }
+
+ delwin(dialog);
+ free(buf);
+ close(fd);
+ return -1; /* ESC pressed */
+}
+
+/*
+ * Go back 'n' lines in text file. Called by dialog_textbox().
+ * 'page' will be updated to point to the desired line in 'buf'.
+ */
+static void back_lines(int n)
+{
+ int i, fpos;
+
+ begin_reached = 0;
+ /* We have to distinguish between end_reached and !end_reached
+ since at end of file, the line is not ended by a '\n'.
+ The code inside 'if' basically does a '--page' to move one
+ character backward so as to skip '\n' of the previous line */
+ if (!end_reached) {
+ /* Either beginning of buffer or beginning of file reached? */
+ if (page == buf) {
+ if ((fpos = lseek(fd, 0, SEEK_CUR)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in "
+ "back_lines().\n");
+ exit(-1);
+ }
+ if (fpos > bytes_read) { /* Not beginning of file yet */
+ /* We've reached beginning of buffer, but not beginning of
+ file yet, so read previous part of file into buffer.
+ Note that we only move backward for BUF_SIZE/2 bytes,
+ but not BUF_SIZE bytes to avoid re-reading again in
+ print_page() later */
+ /* Really possible to move backward BUF_SIZE/2 bytes? */
+ if (fpos < BUF_SIZE / 2 + bytes_read) {
+ /* No, move less then */
+ if (lseek(fd, 0, SEEK_SET) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in "
+ "back_lines().\n");
+ exit(-1);
+ }
+ page = buf + fpos - bytes_read;
+ } else { /* Move backward BUF_SIZE/2 bytes */
+ if (lseek (fd, -(BUF_SIZE / 2 + bytes_read), SEEK_CUR) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer "
+ "in back_lines().\n");
+ exit(-1);
+ }
+ page = buf + BUF_SIZE / 2;
+ }
+ if ((bytes_read =
+ read(fd, buf, BUF_SIZE)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError reading file in back_lines().\n");
+ exit(-1);
+ }
+ buf[bytes_read] = '\0';
+ } else { /* Beginning of file reached */
+ begin_reached = 1;
+ return;
+ }
+ }
+ if (*(--page) != '\n') { /* '--page' here */
+ /* Something's wrong... */
+ endwin();
+ fprintf(stderr, "\nInternal error in back_lines().\n");
+ exit(-1);
+ }
+ }
+ /* Go back 'n' lines */
+ for (i = 0; i < n; i++)
+ do {
+ if (page == buf) {
+ if ((fpos = lseek(fd, 0, SEEK_CUR)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in back_lines().\n");
+ exit(-1);
+ }
+ if (fpos > bytes_read) {
+ /* Really possible to move backward BUF_SIZE/2 bytes? */
+ if (fpos < BUF_SIZE / 2 + bytes_read) {
+ /* No, move less then */
+ if (lseek(fd, 0, SEEK_SET) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer "
+ "in back_lines().\n");
+ exit(-1);
+ }
+ page = buf + fpos - bytes_read;
+ } else { /* Move backward BUF_SIZE/2 bytes */
+ if (lseek (fd, -(BUF_SIZE / 2 + bytes_read), SEEK_CUR) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer"
+ " in back_lines().\n");
+ exit(-1);
+ }
+ page = buf + BUF_SIZE / 2;
+ }
+ if ((bytes_read =
+ read(fd, buf, BUF_SIZE)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError reading file in "
+ "back_lines().\n");
+ exit(-1);
+ }
+ buf[bytes_read] = '\0';
+ } else { /* Beginning of file reached */
+ begin_reached = 1;
+ return;
+ }
+ }
+ } while (*(--page) != '\n');
+ page++;
+}
+
+/*
+ * Print a new page of text. Called by dialog_textbox().
+ */
+static void print_page(WINDOW * win, int height, int width)
+{
+ int i, passed_end = 0;
+
+ page_length = 0;
+ for (i = 0; i < height; i++) {
+ print_line(win, i, width);
+ if (!passed_end)
+ page_length++;
+ if (end_reached && !passed_end)
+ passed_end = 1;
+ }
+ wnoutrefresh(win);
+}
+
+/*
+ * Print a new line of text. Called by dialog_textbox() and print_page().
+ */
+static void print_line(WINDOW * win, int row, int width)
+{
+ int y, x;
+ char *line;
+
+ line = get_line();
+ line += MIN(strlen(line), hscroll); /* Scroll horizontally */
+ wmove(win, row, 0); /* move cursor to correct line */
+ waddch(win, ' ');
+ waddnstr(win, line, MIN(strlen(line), width - 2));
+
+ getyx(win, y, x);
+ /* Clear 'residue' of previous line */
+#if OLD_NCURSES
+ {
+ int i;
+ for (i = 0; i < width - x; i++)
+ waddch(win, ' ');
+ }
+#else
+ wclrtoeol(win);
+#endif
+}
+
+/*
+ * Return current line of text. Called by dialog_textbox() and print_line().
+ * 'page' should point to start of current line before calling, and will be
+ * updated to point to start of next line.
+ */
+static char *get_line(void)
+{
+ int i = 0, fpos;
+ static char line[MAX_LEN + 1];
+
+ end_reached = 0;
+ while (*page != '\n') {
+ if (*page == '\0') {
+ /* Either end of file or end of buffer reached */
+ if ((fpos = lseek(fd, 0, SEEK_CUR)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in "
+ "get_line().\n");
+ exit(-1);
+ }
+ if (fpos < file_size) { /* Not end of file yet */
+ /* We've reached end of buffer, but not end of file yet,
+ so read next part of file into buffer */
+ if ((bytes_read =
+ read(fd, buf, BUF_SIZE)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError reading file in get_line().\n");
+ exit(-1);
+ }
+ buf[bytes_read] = '\0';
+ page = buf;
+ } else {
+ if (!end_reached)
+ end_reached = 1;
+ break;
+ }
+ } else if (i < MAX_LEN)
+ line[i++] = *(page++);
+ else {
+ /* Truncate lines longer than MAX_LEN characters */
+ if (i == MAX_LEN)
+ line[i++] = '\0';
+ page++;
+ }
+ }
+ if (i <= MAX_LEN)
+ line[i] = '\0';
+ if (!end_reached)
+ page++; /* move pass '\n' */
+
+ return line;
+}
+
+/*
+ * Print current position
+ */
+static void print_position(WINDOW * win, int height, int width)
+{
+ int fpos, percent;
+
+ if ((fpos = lseek(fd, 0, SEEK_CUR)) == -1) {
+ endwin();
+ fprintf(stderr, "\nError moving file pointer in print_position().\n");
+ exit(-1);
+ }
+ wattrset(win, position_indicator_attr);
+ wbkgdset(win, position_indicator_attr & A_COLOR);
+ percent = !file_size ?
+ 100 : ((fpos - bytes_read + page - buf) * 100) / file_size;
+ wmove(win, height - 3, width - 9);
+ wprintw(win, "(%3d%%)", percent);
+}
--- /dev/null
+/*
+ * util.c
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dialog.h"
+
+/* use colors by default? */
+bool use_colors = 1;
+
+const char *backtitle = NULL;
+
+/*
+ * Attribute values, default is for mono display
+ */
+chtype attributes[] = {
+ A_NORMAL, /* screen_attr */
+ A_NORMAL, /* shadow_attr */
+ A_NORMAL, /* dialog_attr */
+ A_BOLD, /* title_attr */
+ A_NORMAL, /* border_attr */
+ A_REVERSE, /* button_active_attr */
+ A_DIM, /* button_inactive_attr */
+ A_REVERSE, /* button_key_active_attr */
+ A_BOLD, /* button_key_inactive_attr */
+ A_REVERSE, /* button_label_active_attr */
+ A_NORMAL, /* button_label_inactive_attr */
+ A_NORMAL, /* inputbox_attr */
+ A_NORMAL, /* inputbox_border_attr */
+ A_NORMAL, /* searchbox_attr */
+ A_BOLD, /* searchbox_title_attr */
+ A_NORMAL, /* searchbox_border_attr */
+ A_BOLD, /* position_indicator_attr */
+ A_NORMAL, /* menubox_attr */
+ A_NORMAL, /* menubox_border_attr */
+ A_NORMAL, /* item_attr */
+ A_REVERSE, /* item_selected_attr */
+ A_BOLD, /* tag_attr */
+ A_REVERSE, /* tag_selected_attr */
+ A_BOLD, /* tag_key_attr */
+ A_REVERSE, /* tag_key_selected_attr */
+ A_BOLD, /* check_attr */
+ A_REVERSE, /* check_selected_attr */
+ A_BOLD, /* uarrow_attr */
+ A_BOLD /* darrow_attr */
+};
+
+#include "colors.h"
+
+/*
+ * Table of color values
+ */
+int color_table[][3] = {
+ {SCREEN_FG, SCREEN_BG, SCREEN_HL},
+ {SHADOW_FG, SHADOW_BG, SHADOW_HL},
+ {DIALOG_FG, DIALOG_BG, DIALOG_HL},
+ {TITLE_FG, TITLE_BG, TITLE_HL},
+ {BORDER_FG, BORDER_BG, BORDER_HL},
+ {BUTTON_ACTIVE_FG, BUTTON_ACTIVE_BG, BUTTON_ACTIVE_HL},
+ {BUTTON_INACTIVE_FG, BUTTON_INACTIVE_BG, BUTTON_INACTIVE_HL},
+ {BUTTON_KEY_ACTIVE_FG, BUTTON_KEY_ACTIVE_BG, BUTTON_KEY_ACTIVE_HL},
+ {BUTTON_KEY_INACTIVE_FG, BUTTON_KEY_INACTIVE_BG,
+ BUTTON_KEY_INACTIVE_HL},
+ {BUTTON_LABEL_ACTIVE_FG, BUTTON_LABEL_ACTIVE_BG,
+ BUTTON_LABEL_ACTIVE_HL},
+ {BUTTON_LABEL_INACTIVE_FG, BUTTON_LABEL_INACTIVE_BG,
+ BUTTON_LABEL_INACTIVE_HL},
+ {INPUTBOX_FG, INPUTBOX_BG, INPUTBOX_HL},
+ {INPUTBOX_BORDER_FG, INPUTBOX_BORDER_BG, INPUTBOX_BORDER_HL},
+ {SEARCHBOX_FG, SEARCHBOX_BG, SEARCHBOX_HL},
+ {SEARCHBOX_TITLE_FG, SEARCHBOX_TITLE_BG, SEARCHBOX_TITLE_HL},
+ {SEARCHBOX_BORDER_FG, SEARCHBOX_BORDER_BG, SEARCHBOX_BORDER_HL},
+ {POSITION_INDICATOR_FG, POSITION_INDICATOR_BG, POSITION_INDICATOR_HL},
+ {MENUBOX_FG, MENUBOX_BG, MENUBOX_HL},
+ {MENUBOX_BORDER_FG, MENUBOX_BORDER_BG, MENUBOX_BORDER_HL},
+ {ITEM_FG, ITEM_BG, ITEM_HL},
+ {ITEM_SELECTED_FG, ITEM_SELECTED_BG, ITEM_SELECTED_HL},
+ {TAG_FG, TAG_BG, TAG_HL},
+ {TAG_SELECTED_FG, TAG_SELECTED_BG, TAG_SELECTED_HL},
+ {TAG_KEY_FG, TAG_KEY_BG, TAG_KEY_HL},
+ {TAG_KEY_SELECTED_FG, TAG_KEY_SELECTED_BG, TAG_KEY_SELECTED_HL},
+ {CHECK_FG, CHECK_BG, CHECK_HL},
+ {CHECK_SELECTED_FG, CHECK_SELECTED_BG, CHECK_SELECTED_HL},
+ {UARROW_FG, UARROW_BG, UARROW_HL},
+ {DARROW_FG, DARROW_BG, DARROW_HL},
+}; /* color_table */
+
+/*
+ * Set window to attribute 'attr'
+ */
+void attr_clear(WINDOW * win, int height, int width, chtype attr)
+{
+ int i, j;
+
+ wattrset(win, attr);
+ for (i = 0; i < height; i++) {
+ wmove(win, i, 0);
+ for (j = 0; j < width; j++)
+ waddch(win, ' ');
+ }
+ touchwin(win);
+}
+
+void dialog_clear(void)
+{
+ attr_clear(stdscr, LINES, COLS, screen_attr);
+ /* Display background title if it exists ... - SLH */
+ if (backtitle != NULL) {
+ int i;
+
+ wattrset(stdscr, screen_attr);
+ mvwaddstr(stdscr, 0, 1, (char *)backtitle);
+ wmove(stdscr, 1, 1);
+ for (i = 1; i < COLS - 1; i++)
+ waddch(stdscr, ACS_HLINE);
+ }
+ wnoutrefresh(stdscr);
+}
+
+/*
+ * Do some initialization for dialog
+ */
+void init_dialog(void)
+{
+ initscr(); /* Init curses */
+ keypad(stdscr, TRUE);
+ cbreak();
+ noecho();
+
+ if (use_colors) /* Set up colors */
+ color_setup();
+
+ dialog_clear();
+}
+
+/*
+ * Setup for color display
+ */
+void color_setup(void)
+{
+ int i;
+
+ if (has_colors()) { /* Terminal supports color? */
+ start_color();
+
+ /* Initialize color pairs */
+ for (i = 0; i < ATTRIBUTE_COUNT; i++)
+ init_pair(i + 1, color_table[i][0], color_table[i][1]);
+
+ /* Setup color attributes */
+ for (i = 0; i < ATTRIBUTE_COUNT; i++)
+ attributes[i] = C_ATTR(color_table[i][2], i + 1);
+ }
+}
+
+/*
+ * End using dialog functions.
+ */
+void end_dialog(void)
+{
+ endwin();
+}
+
+/* Print the title of the dialog. Center the title and truncate
+ * tile if wider than dialog (- 2 chars).
+ **/
+void print_title(WINDOW *dialog, const char *title, int width)
+{
+ if (title) {
+ int tlen = MIN(width - 2, strlen(title));
+ wattrset(dialog, title_attr);
+ mvwaddch(dialog, 0, (width - tlen) / 2 - 1, ' ');
+ mvwaddnstr(dialog, 0, (width - tlen)/2, title, tlen);
+ waddch(dialog, ' ');
+ }
+}
+
+/*
+ * Print a string of text in a window, automatically wrap around to the
+ * next line if the string is too long to fit on one line. Newline
+ * characters '\n' are replaced by spaces. We start on a new line
+ * if there is no room for at least 4 nonblanks following a double-space.
+ */
+void print_autowrap(WINDOW * win, const char *prompt, int width, int y, int x)
+{
+ int newl, cur_x, cur_y;
+ int i, prompt_len, room, wlen;
+ char tempstr[MAX_LEN + 1], *word, *sp, *sp2;
+
+ strcpy(tempstr, prompt);
+
+ prompt_len = strlen(tempstr);
+
+ /*
+ * Remove newlines
+ */
+ for (i = 0; i < prompt_len; i++) {
+ if (tempstr[i] == '\n')
+ tempstr[i] = ' ';
+ }
+
+ if (prompt_len <= width - x * 2) { /* If prompt is short */
+ wmove(win, y, (width - prompt_len) / 2);
+ waddstr(win, tempstr);
+ } else {
+ cur_x = x;
+ cur_y = y;
+ newl = 1;
+ word = tempstr;
+ while (word && *word) {
+ sp = index(word, ' ');
+ if (sp)
+ *sp++ = 0;
+
+ /* Wrap to next line if either the word does not fit,
+ or it is the first word of a new sentence, and it is
+ short, and the next word does not fit. */
+ room = width - cur_x;
+ wlen = strlen(word);
+ if (wlen > room ||
+ (newl && wlen < 4 && sp
+ && wlen + 1 + strlen(sp) > room
+ && (!(sp2 = index(sp, ' '))
+ || wlen + 1 + (sp2 - sp) > room))) {
+ cur_y++;
+ cur_x = x;
+ }
+ wmove(win, cur_y, cur_x);
+ waddstr(win, word);
+ getyx(win, cur_y, cur_x);
+ cur_x++;
+ if (sp && *sp == ' ') {
+ cur_x++; /* double space */
+ while (*++sp == ' ') ;
+ newl = 1;
+ } else
+ newl = 0;
+ word = sp;
+ }
+ }
+}
+
+/*
+ * Print a button
+ */
+void print_button(WINDOW * win, const char *label, int y, int x, int selected)
+{
+ int i, temp;
+
+ wmove(win, y, x);
+ wattrset(win, selected ? button_active_attr : button_inactive_attr);
+ waddstr(win, "<");
+ temp = strspn(label, " ");
+ label += temp;
+ wattrset(win, selected ? button_label_active_attr
+ : button_label_inactive_attr);
+ for (i = 0; i < temp; i++)
+ waddch(win, ' ');
+ wattrset(win, selected ? button_key_active_attr
+ : button_key_inactive_attr);
+ waddch(win, label[0]);
+ wattrset(win, selected ? button_label_active_attr
+ : button_label_inactive_attr);
+ waddstr(win, (char *)label + 1);
+ wattrset(win, selected ? button_active_attr : button_inactive_attr);
+ waddstr(win, ">");
+ wmove(win, y, x + temp + 1);
+}
+
+/*
+ * Draw a rectangular box with line drawing characters
+ */
+void
+draw_box(WINDOW * win, int y, int x, int height, int width,
+ chtype box, chtype border)
+{
+ int i, j;
+
+ wattrset(win, 0);
+ for (i = 0; i < height; i++) {
+ wmove(win, y + i, x);
+ for (j = 0; j < width; j++)
+ if (!i && !j)
+ waddch(win, border | ACS_ULCORNER);
+ else if (i == height - 1 && !j)
+ waddch(win, border | ACS_LLCORNER);
+ else if (!i && j == width - 1)
+ waddch(win, box | ACS_URCORNER);
+ else if (i == height - 1 && j == width - 1)
+ waddch(win, box | ACS_LRCORNER);
+ else if (!i)
+ waddch(win, border | ACS_HLINE);
+ else if (i == height - 1)
+ waddch(win, box | ACS_HLINE);
+ else if (!j)
+ waddch(win, border | ACS_VLINE);
+ else if (j == width - 1)
+ waddch(win, box | ACS_VLINE);
+ else
+ waddch(win, box | ' ');
+ }
+}
+
+/*
+ * Draw shadows along the right and bottom edge to give a more 3D look
+ * to the boxes
+ */
+void draw_shadow(WINDOW * win, int y, int x, int height, int width)
+{
+ int i;
+
+ if (has_colors()) { /* Whether terminal supports color? */
+ wattrset(win, shadow_attr);
+ wmove(win, y + height, x + 2);
+ for (i = 0; i < width; i++)
+ waddch(win, winch(win) & A_CHARTEXT);
+ for (i = y + 1; i < y + height + 1; i++) {
+ wmove(win, i, x + width);
+ waddch(win, winch(win) & A_CHARTEXT);
+ waddch(win, winch(win) & A_CHARTEXT);
+ }
+ wnoutrefresh(win);
+ }
+}
+
+/*
+ * Return the position of the first alphabetic character in a string.
+ */
+int first_alpha(const char *string, const char *exempt)
+{
+ int i, in_paren = 0, c;
+
+ for (i = 0; i < strlen(string); i++) {
+ c = tolower(string[i]);
+
+ if (strchr("<[(", c))
+ ++in_paren;
+ if (strchr(">])", c) && in_paren > 0)
+ --in_paren;
+
+ if ((!in_paren) && isalpha(c) && strchr(exempt, c) == 0)
+ return i;
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * yesno.c -- implements the yes/no box
+ *
+ * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
+ * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dialog.h"
+
+/*
+ * Display termination buttons
+ */
+static void print_buttons(WINDOW * dialog, int height, int width, int selected)
+{
+ int x = width / 2 - 10;
+ int y = height - 2;
+
+ print_button(dialog, " Yes ", y, x, selected == 0);
+ print_button(dialog, " No ", y, x + 13, selected == 1);
+
+ wmove(dialog, y, x + 1 + 13 * selected);
+ wrefresh(dialog);
+}
+
+/*
+ * Display a dialog box with two buttons - Yes and No
+ */
+int dialog_yesno(const char *title, const char *prompt, int height, int width)
+{
+ int i, x, y, key = 0, button = 0;
+ WINDOW *dialog;
+
+ /* center dialog box on screen */
+ x = (COLS - width) / 2;
+ y = (LINES - height) / 2;
+
+ draw_shadow(stdscr, y, x, height, width);
+
+ dialog = newwin(height, width, y, x);
+ keypad(dialog, TRUE);
+
+ draw_box(dialog, 0, 0, height, width, dialog_attr, border_attr);
+ wattrset(dialog, border_attr);
+ mvwaddch(dialog, height - 3, 0, ACS_LTEE);
+ for (i = 0; i < width - 2; i++)
+ waddch(dialog, ACS_HLINE);
+ wattrset(dialog, dialog_attr);
+ waddch(dialog, ACS_RTEE);
+
+ print_title(dialog, title, width);
+
+ wattrset(dialog, dialog_attr);
+ print_autowrap(dialog, prompt, width - 2, 1, 3);
+
+ print_buttons(dialog, height, width, 0);
+
+ while (key != ESC) {
+ key = wgetch(dialog);
+ switch (key) {
+ case 'Y':
+ case 'y':
+ delwin(dialog);
+ return 0;
+ case 'N':
+ case 'n':
+ delwin(dialog);
+ return 1;
+
+ case TAB:
+ case KEY_LEFT:
+ case KEY_RIGHT:
+ button = ((key == KEY_LEFT ? --button : ++button) < 0) ? 1 : (button > 1 ? 0 : button);
+
+ print_buttons(dialog, height, width, button);
+ wrefresh(dialog);
+ break;
+ case ' ':
+ case '\n':
+ delwin(dialog);
+ return button;
+ case ESC:
+ break;
+ }
+ }
+
+ delwin(dialog);
+ return -1; /* ESC pressed */
+}
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ * Introduced single menu mode (show all sub-menus in one large tree).
+ * 2002-11-06 Petr Baudis <pasky@ucw.cz>
+ *
+ * i18n, 2005, Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+
+#include <sys/ioctl.h>
+#include <sys/wait.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <termios.h>
+#include <unistd.h>
+#include <locale.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+static char menu_backtitle[128];
+static const char mconf_readme[] = N_(
+"Overview\n"
+"--------\n"
+"Some kernel features may be built directly into the kernel.\n"
+"Some may be made into loadable runtime modules. Some features\n"
+"may be completely removed altogether. There are also certain\n"
+"kernel parameters which are not really features, but must be\n"
+"entered in as decimal or hexadecimal numbers or possibly text.\n"
+"\n"
+"Menu items beginning with [*], <M> or [ ] represent features\n"
+"configured to be built in, modularized or removed respectively.\n"
+"Pointed brackets <> represent module capable features.\n"
+"\n"
+"To change any of these features, highlight it with the cursor\n"
+"keys and press <Y> to build it in, <M> to make it a module or\n"
+"<N> to removed it. You may also press the <Space Bar> to cycle\n"
+"through the available options (ie. Y->N->M->Y).\n"
+"\n"
+"Some additional keyboard hints:\n"
+"\n"
+"Menus\n"
+"----------\n"
+"o Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
+" you wish to change or submenu wish to select and press <Enter>.\n"
+" Submenus are designated by \"--->\".\n"
+"\n"
+" Shortcut: Press the option's highlighted letter (hotkey).\n"
+" Pressing a hotkey more than once will sequence\n"
+" through all visible items which use that hotkey.\n"
+"\n"
+" You may also use the <PAGE UP> and <PAGE DOWN> keys to scroll\n"
+" unseen options into view.\n"
+"\n"
+"o To exit a menu use the cursor keys to highlight the <Exit> button\n"
+" and press <ENTER>.\n"
+"\n"
+" Shortcut: Press <ESC><ESC> or <E> or <X> if there is no hotkey\n"
+" using those letters. You may press a single <ESC>, but\n"
+" there is a delayed response which you may find annoying.\n"
+"\n"
+" Also, the <TAB> and cursor keys will cycle between <Select>,\n"
+" <Exit> and <Help>\n"
+"\n"
+"o To get help with an item, use the cursor keys to highlight <Help>\n"
+" and Press <ENTER>.\n"
+"\n"
+" Shortcut: Press <H> or <?>.\n"
+"\n"
+"\n"
+"Radiolists (Choice lists)\n"
+"-----------\n"
+"o Use the cursor keys to select the option you wish to set and press\n"
+" <S> or the <SPACE BAR>.\n"
+"\n"
+" Shortcut: Press the first letter of the option you wish to set then\n"
+" press <S> or <SPACE BAR>.\n"
+"\n"
+"o To see available help for the item, use the cursor keys to highlight\n"
+" <Help> and Press <ENTER>.\n"
+"\n"
+" Shortcut: Press <H> or <?>.\n"
+"\n"
+" Also, the <TAB> and cursor keys will cycle between <Select> and\n"
+" <Help>\n"
+"\n"
+"\n"
+"Data Entry\n"
+"-----------\n"
+"o Enter the requested information and press <ENTER>\n"
+" If you are entering hexadecimal values, it is not necessary to\n"
+" add the '0x' prefix to the entry.\n"
+"\n"
+"o For help, use the <TAB> or cursor keys to highlight the help option\n"
+" and press <ENTER>. You can try <TAB><H> as well.\n"
+"\n"
+"\n"
+"Text Box (Help Window)\n"
+"--------\n"
+"o Use the cursor keys to scroll up/down/left/right. The VI editor\n"
+" keys h,j,k,l function here as do <SPACE BAR> and <B> for those\n"
+" who are familiar with less and lynx.\n"
+"\n"
+"o Press <E>, <X>, <Enter> or <Esc><Esc> to exit.\n"
+"\n"
+"\n"
+"Alternate Configuration Files\n"
+"-----------------------------\n"
+"Menuconfig supports the use of alternate configuration files for\n"
+"those who, for various reasons, find it necessary to switch\n"
+"between different kernel configurations.\n"
+"\n"
+"At the end of the main menu you will find two options. One is\n"
+"for saving the current configuration to a file of your choosing.\n"
+"The other option is for loading a previously saved alternate\n"
+"configuration.\n"
+"\n"
+"Even if you don't use alternate configuration files, but you\n"
+"find during a Menuconfig session that you have completely messed\n"
+"up your settings, you may use the \"Load Alternate...\" option to\n"
+"restore your previously saved settings from \".config\" without\n"
+"restarting Menuconfig.\n"
+"\n"
+"Other information\n"
+"-----------------\n"
+"If you use Menuconfig in an XTERM window make sure you have your\n"
+"$TERM variable set to point to a xterm definition which supports color.\n"
+"Otherwise, Menuconfig will look rather bad. Menuconfig will not\n"
+"display correctly in a RXVT window because rxvt displays only one\n"
+"intensity of color, bright.\n"
+"\n"
+"Menuconfig will display larger menus on screens or xterms which are\n"
+"set to display more than the standard 25 row by 80 column geometry.\n"
+"In order for this to work, the \"stty size\" command must be able to\n"
+"display the screen's current row and column geometry. I STRONGLY\n"
+"RECOMMEND that you make sure you do NOT have the shell variables\n"
+"LINES and COLUMNS exported into your environment. Some distributions\n"
+"export those variables via /etc/profile. Some ncurses programs can\n"
+"become confused when those variables (LINES & COLUMNS) don't reflect\n"
+"the true screen size.\n"
+"\n"
+"Optional personality available\n"
+"------------------------------\n"
+"If you prefer to have all of the kernel options listed in a single\n"
+"menu, rather than the default multimenu hierarchy, run the menuconfig\n"
+"with MENUCONFIG_MODE environment variable set to single_menu. Example:\n"
+"\n"
+"make MENUCONFIG_MODE=single_menu menuconfig\n"
+"\n"
+"<Enter> will then unroll the appropriate category, or enfold it if it\n"
+"is already unrolled.\n"
+"\n"
+"Note that this mode can eventually be a little more CPU expensive\n"
+"(especially with a larger number of unrolled categories) than the\n"
+"default mode.\n"),
+menu_instructions[] = N_(
+ "Arrow keys navigate the menu. "
+ "<Enter> selects submenus --->. "
+ "Highlighted letters are hotkeys. "
+ "Pressing <Y> includes, <N> excludes, <M> modularizes features. "
+ "Press <Esc><Esc> to exit, <?> for Help, </> for Search. "
+ "Legend: [*] built-in [ ] excluded <M> module < > module capable"),
+radiolist_instructions[] = N_(
+ "Use the arrow keys to navigate this window or "
+ "press the hotkey of the item you wish to select "
+ "followed by the <SPACE BAR>. "
+ "Press <?> for additional information about this option."),
+inputbox_instructions_int[] = N_(
+ "Please enter a decimal value. "
+ "Fractions will not be accepted. "
+ "Use the <TAB> key to move from the input field to the buttons below it."),
+inputbox_instructions_hex[] = N_(
+ "Please enter a hexadecimal value. "
+ "Use the <TAB> key to move from the input field to the buttons below it."),
+inputbox_instructions_string[] = N_(
+ "Please enter a string value. "
+ "Use the <TAB> key to move from the input field to the buttons below it."),
+setmod_text[] = N_(
+ "This feature depends on another which has been configured as a module.\n"
+ "As a result, this feature will be built as a module."),
+nohelp_text[] = N_(
+ "There is no help available for this kernel option.\n"),
+load_config_text[] = N_(
+ "Enter the name of the configuration file you wish to load. "
+ "Accept the name shown to restore the configuration you "
+ "last retrieved. Leave blank to abort."),
+load_config_help[] = N_(
+ "\n"
+ "For various reasons, one may wish to keep several different kernel\n"
+ "configurations available on a single machine.\n"
+ "\n"
+ "If you have saved a previous configuration in a file other than the\n"
+ "kernel's default, entering the name of the file here will allow you\n"
+ "to modify that configuration.\n"
+ "\n"
+ "If you are uncertain, then you have probably never used alternate\n"
+ "configuration files. You should therefor leave this blank to abort.\n"),
+save_config_text[] = N_(
+ "Enter a filename to which this configuration should be saved "
+ "as an alternate. Leave blank to abort."),
+save_config_help[] = N_(
+ "\n"
+ "For various reasons, one may wish to keep different kernel\n"
+ "configurations available on a single machine.\n"
+ "\n"
+ "Entering a file name here will allow you to later retrieve, modify\n"
+ "and use the current configuration as an alternate to whatever\n"
+ "configuration options you have selected at that time.\n"
+ "\n"
+ "If you are uncertain what all this means then you should probably\n"
+ "leave this blank.\n"),
+search_help[] = N_(
+ "\n"
+ "Search for CONFIG_ symbols and display their relations.\n"
+ "Regular expressions are allowed.\n"
+ "Example: search for \"^FOO\"\n"
+ "Result:\n"
+ "-----------------------------------------------------------------\n"
+ "Symbol: FOO [=m]\n"
+ "Prompt: Foo bus is used to drive the bar HW\n"
+ "Defined at drivers/pci/Kconfig:47\n"
+ "Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
+ "Location:\n"
+ " -> Bus options (PCI, PCMCIA, EISA, MCA, ISA)\n"
+ " -> PCI support (PCI [=y])\n"
+ " -> PCI access mode (<choice> [=y])\n"
+ "Selects: LIBCRC32\n"
+ "Selected by: BAR\n"
+ "-----------------------------------------------------------------\n"
+ "o The line 'Prompt:' shows the text used in the menu structure for\n"
+ " this CONFIG_ symbol\n"
+ "o The 'Defined at' line tell at what file / line number the symbol\n"
+ " is defined\n"
+ "o The 'Depends on:' line tell what symbols needs to be defined for\n"
+ " this symbol to be visible in the menu (selectable)\n"
+ "o The 'Location:' lines tell where in the menu structure this symbol\n"
+ " is located\n"
+ " A location followed by a [=y] indicate that this is a selectable\n"
+ " menu item - and current value is displayed inside brackets.\n"
+ "o The 'Selects:' line tell what symbol will be automatically\n"
+ " selected if this symbol is selected (y or m)\n"
+ "o The 'Selected by' line tell what symbol has selected this symbol\n"
+ "\n"
+ "Only relevant lines are shown.\n"
+ "\n\n"
+ "Search examples:\n"
+ "Examples: USB => find all CONFIG_ symbols containing USB\n"
+ " ^USB => find all CONFIG_ symbols starting with USB\n"
+ " USB$ => find all CONFIG_ symbols ending with USB\n"
+ "\n");
+
+static char buf[4096], *bufptr = buf;
+static char input_buf[4096];
+static char filename[PATH_MAX+1] = ".config";
+static char *args[1024], **argptr = args;
+static int indent;
+static struct termios ios_org;
+static int rows = 0, cols = 0;
+static struct menu *current_menu;
+static int child_count;
+static int do_resize;
+static int single_menu_mode;
+
+static void conf(struct menu *menu);
+static void conf_choice(struct menu *menu);
+static void conf_string(struct menu *menu);
+static void conf_load(void);
+static void conf_save(void);
+static void show_textbox(const char *title, const char *text, int r, int c);
+static void show_helptext(const char *title, const char *text);
+static void show_help(struct menu *menu);
+static void show_file(const char *filename, const char *title, int r, int c);
+
+static void cprint_init(void);
+static int cprint1(const char *fmt, ...);
+static void cprint_done(void);
+static int cprint(const char *fmt, ...);
+
+static void init_wsize(void)
+{
+ struct winsize ws;
+ char *env;
+
+ if (!ioctl(STDIN_FILENO, TIOCGWINSZ, &ws)) {
+ rows = ws.ws_row;
+ cols = ws.ws_col;
+ }
+
+ if (!rows) {
+ env = getenv("LINES");
+ if (env)
+ rows = atoi(env);
+ if (!rows)
+ rows = 24;
+ }
+ if (!cols) {
+ env = getenv("COLUMNS");
+ if (env)
+ cols = atoi(env);
+ if (!cols)
+ cols = 80;
+ }
+
+ if (rows < 19 || cols < 80) {
+ fprintf(stderr, N_("Your display is too small to run Menuconfig!\n"));
+ fprintf(stderr, N_("It must be at least 19 lines by 80 columns.\n"));
+ exit(1);
+ }
+
+ rows -= 4;
+ cols -= 5;
+}
+
+static void cprint_init(void)
+{
+ bufptr = buf;
+ argptr = args;
+ memset(args, 0, sizeof(args));
+ indent = 0;
+ child_count = 0;
+ cprint("./scripts/kconfig/lxdialog/lxdialog");
+ cprint("--backtitle");
+ cprint(menu_backtitle);
+}
+
+static int cprint1(const char *fmt, ...)
+{
+ va_list ap;
+ int res;
+
+ if (!*argptr)
+ *argptr = bufptr;
+ va_start(ap, fmt);
+ res = vsprintf(bufptr, fmt, ap);
+ va_end(ap);
+ bufptr += res;
+
+ return res;
+}
+
+static void cprint_done(void)
+{
+ *bufptr++ = 0;
+ argptr++;
+}
+
+static int cprint(const char *fmt, ...)
+{
+ va_list ap;
+ int res;
+
+ *argptr++ = bufptr;
+ va_start(ap, fmt);
+ res = vsprintf(bufptr, fmt, ap);
+ va_end(ap);
+ bufptr += res;
+ *bufptr++ = 0;
+
+ return res;
+}
+
+static void get_prompt_str(struct gstr *r, struct property *prop)
+{
+ int i, j;
+ struct menu *submenu[8], *menu;
+
+ str_printf(r, "Prompt: %s\n", prop->text);
+ str_printf(r, " Defined at %s:%d\n", prop->menu->file->name,
+ prop->menu->lineno);
+ if (!expr_is_yes(prop->visible.expr)) {
+ str_append(r, " Depends on: ");
+ expr_gstr_print(prop->visible.expr, r);
+ str_append(r, "\n");
+ }
+ menu = prop->menu->parent;
+ for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent)
+ submenu[i++] = menu;
+ if (i > 0) {
+ str_printf(r, " Location:\n");
+ for (j = 4; --i >= 0; j += 2) {
+ menu = submenu[i];
+ str_printf(r, "%*c-> %s", j, ' ', menu_get_prompt(menu));
+ if (menu->sym) {
+ str_printf(r, " (%s [=%s])", menu->sym->name ?
+ menu->sym->name : "<choice>",
+ sym_get_string_value(menu->sym));
+ }
+ str_append(r, "\n");
+ }
+ }
+}
+
+static void get_symbol_str(struct gstr *r, struct symbol *sym)
+{
+ bool hit;
+ struct property *prop;
+
+ str_printf(r, "Symbol: %s [=%s]\n", sym->name,
+ sym_get_string_value(sym));
+ for_all_prompts(sym, prop)
+ get_prompt_str(r, prop);
+ hit = false;
+ for_all_properties(sym, prop, P_SELECT) {
+ if (!hit) {
+ str_append(r, " Selects: ");
+ hit = true;
+ } else
+ str_printf(r, " && ");
+ expr_gstr_print(prop->expr, r);
+ }
+ if (hit)
+ str_append(r, "\n");
+ if (sym->rev_dep.expr) {
+ str_append(r, " Selected by: ");
+ expr_gstr_print(sym->rev_dep.expr, r);
+ str_append(r, "\n");
+ }
+ str_append(r, "\n\n");
+}
+
+static struct gstr get_relations_str(struct symbol **sym_arr)
+{
+ struct symbol *sym;
+ struct gstr res = str_new();
+ int i;
+
+ for (i = 0; sym_arr && (sym = sym_arr[i]); i++)
+ get_symbol_str(&res, sym);
+ if (!i)
+ str_append(&res, "No matches found.\n");
+ return res;
+}
+
+pid_t pid;
+
+static void winch_handler(int sig)
+{
+ if (!do_resize) {
+ kill(pid, SIGINT);
+ do_resize = 1;
+ }
+}
+
+static int exec_conf(void)
+{
+ int pipefd[2], stat, size;
+ struct sigaction sa;
+ sigset_t sset, osset;
+
+ sigemptyset(&sset);
+ sigaddset(&sset, SIGINT);
+ sigprocmask(SIG_BLOCK, &sset, &osset);
+
+ signal(SIGINT, SIG_DFL);
+
+ sa.sa_handler = winch_handler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ sigaction(SIGWINCH, &sa, NULL);
+
+ *argptr++ = NULL;
+
+ pipe(pipefd);
+ pid = fork();
+ if (pid == 0) {
+ sigprocmask(SIG_SETMASK, &osset, NULL);
+ dup2(pipefd[1], 2);
+ close(pipefd[0]);
+ close(pipefd[1]);
+ execv(args[0], args);
+ _exit(EXIT_FAILURE);
+ }
+
+ close(pipefd[1]);
+ bufptr = input_buf;
+ while (1) {
+ size = input_buf + sizeof(input_buf) - bufptr;
+ size = read(pipefd[0], bufptr, size);
+ if (size <= 0) {
+ if (size < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ perror("read");
+ }
+ break;
+ }
+ bufptr += size;
+ }
+ *bufptr++ = 0;
+ close(pipefd[0]);
+ waitpid(pid, &stat, 0);
+
+ if (do_resize) {
+ init_wsize();
+ do_resize = 0;
+ sigprocmask(SIG_SETMASK, &osset, NULL);
+ return -1;
+ }
+ if (WIFSIGNALED(stat)) {
+ printf("\finterrupted(%d)\n", WTERMSIG(stat));
+ exit(1);
+ }
+#if 0
+ printf("\fexit state: %d\nexit data: '%s'\n", WEXITSTATUS(stat), input_buf);
+ sleep(1);
+#endif
+ sigpending(&sset);
+ if (sigismember(&sset, SIGINT)) {
+ printf("\finterrupted\n");
+ exit(1);
+ }
+ sigprocmask(SIG_SETMASK, &osset, NULL);
+
+ return WEXITSTATUS(stat);
+}
+
+static void search_conf(void)
+{
+ struct symbol **sym_arr;
+ int stat;
+ struct gstr res;
+
+again:
+ cprint_init();
+ cprint("--title");
+ cprint(_("Search Configuration Parameter"));
+ cprint("--inputbox");
+ cprint(_("Enter CONFIG_ (sub)string to search for (omit CONFIG_)"));
+ cprint("10");
+ cprint("75");
+ cprint("");
+ stat = exec_conf();
+ if (stat < 0)
+ goto again;
+ switch (stat) {
+ case 0:
+ break;
+ case 1:
+ show_helptext(_("Search Configuration"), search_help);
+ goto again;
+ default:
+ return;
+ }
+
+ sym_arr = sym_re_search(input_buf);
+ res = get_relations_str(sym_arr);
+ free(sym_arr);
+ show_textbox(_("Search Results"), str_get(&res), 0, 0);
+ str_free(&res);
+}
+
+static void build_conf(struct menu *menu)
+{
+ struct symbol *sym;
+ struct property *prop;
+ struct menu *child;
+ int type, tmp, doint = 2;
+ tristate val;
+ char ch;
+
+ if (!menu_is_visible(menu))
+ return;
+
+ sym = menu->sym;
+ prop = menu->prompt;
+ if (!sym) {
+ if (prop && menu != current_menu) {
+ const char *prompt = menu_get_prompt(menu);
+ switch (prop->type) {
+ case P_MENU:
+ child_count++;
+ cprint("m%p", menu);
+
+ if (single_menu_mode) {
+ cprint1("%s%*c%s",
+ menu->data ? "-->" : "++>",
+ indent + 1, ' ', prompt);
+ } else
+ cprint1(" %*c%s --->", indent + 1, ' ', prompt);
+
+ cprint_done();
+ if (single_menu_mode && menu->data)
+ goto conf_childs;
+ return;
+ default:
+ if (prompt) {
+ child_count++;
+ cprint(":%p", menu);
+ cprint("---%*c%s", indent + 1, ' ', prompt);
+ }
+ }
+ } else
+ doint = 0;
+ goto conf_childs;
+ }
+
+ type = sym_get_type(sym);
+ if (sym_is_choice(sym)) {
+ struct symbol *def_sym = sym_get_choice_value(sym);
+ struct menu *def_menu = NULL;
+
+ child_count++;
+ for (child = menu->list; child; child = child->next) {
+ if (menu_is_visible(child) && child->sym == def_sym)
+ def_menu = child;
+ }
+
+ val = sym_get_tristate_value(sym);
+ if (sym_is_changable(sym)) {
+ cprint("t%p", menu);
+ switch (type) {
+ case S_BOOLEAN:
+ cprint1("[%c]", val == no ? ' ' : '*');
+ break;
+ case S_TRISTATE:
+ switch (val) {
+ case yes: ch = '*'; break;
+ case mod: ch = 'M'; break;
+ default: ch = ' '; break;
+ }
+ cprint1("<%c>", ch);
+ break;
+ }
+ } else {
+ cprint("%c%p", def_menu ? 't' : ':', menu);
+ cprint1(" ");
+ }
+
+ cprint1("%*c%s", indent + 1, ' ', menu_get_prompt(menu));
+ if (val == yes) {
+ if (def_menu) {
+ cprint1(" (%s)", menu_get_prompt(def_menu));
+ cprint1(" --->");
+ cprint_done();
+ if (def_menu->list) {
+ indent += 2;
+ build_conf(def_menu);
+ indent -= 2;
+ }
+ } else
+ cprint_done();
+ return;
+ }
+ cprint_done();
+ } else {
+ if (menu == current_menu) {
+ cprint(":%p", menu);
+ cprint("---%*c%s", indent + 1, ' ', menu_get_prompt(menu));
+ goto conf_childs;
+ }
+ child_count++;
+ val = sym_get_tristate_value(sym);
+ if (sym_is_choice_value(sym) && val == yes) {
+ cprint(":%p", menu);
+ cprint1(" ");
+ } else {
+ switch (type) {
+ case S_BOOLEAN:
+ cprint("t%p", menu);
+ if (sym_is_changable(sym))
+ cprint1("[%c]", val == no ? ' ' : '*');
+ else
+ cprint1("---");
+ break;
+ case S_TRISTATE:
+ cprint("t%p", menu);
+ switch (val) {
+ case yes: ch = '*'; break;
+ case mod: ch = 'M'; break;
+ default: ch = ' '; break;
+ }
+ if (sym_is_changable(sym))
+ cprint1("<%c>", ch);
+ else
+ cprint1("---");
+ break;
+ default:
+ cprint("s%p", menu);
+ tmp = cprint1("(%s)", sym_get_string_value(sym));
+ tmp = indent - tmp + 4;
+ if (tmp < 0)
+ tmp = 0;
+ cprint1("%*c%s%s", tmp, ' ', menu_get_prompt(menu),
+ (sym_has_value(sym) || !sym_is_changable(sym)) ?
+ "" : " (NEW)");
+ cprint_done();
+ goto conf_childs;
+ }
+ }
+ cprint1("%*c%s%s", indent + 1, ' ', menu_get_prompt(menu),
+ (sym_has_value(sym) || !sym_is_changable(sym)) ?
+ "" : " (NEW)");
+ if (menu->prompt->type == P_MENU) {
+ cprint1(" --->");
+ cprint_done();
+ return;
+ }
+ cprint_done();
+ }
+
+conf_childs:
+ indent += doint;
+ for (child = menu->list; child; child = child->next)
+ build_conf(child);
+ indent -= doint;
+}
+
+static void conf(struct menu *menu)
+{
+ struct menu *submenu;
+ const char *prompt = menu_get_prompt(menu);
+ struct symbol *sym;
+ char active_entry[40];
+ int stat, type, i;
+
+ unlink("lxdialog.scrltmp");
+ active_entry[0] = 0;
+ while (1) {
+ cprint_init();
+ cprint("--title");
+ cprint("%s", prompt ? prompt : _("Main Menu"));
+ cprint("--menu");
+ cprint(_(menu_instructions));
+ cprint("%d", rows);
+ cprint("%d", cols);
+ cprint("%d", rows - 10);
+ cprint("%s", active_entry);
+ current_menu = menu;
+ build_conf(menu);
+ if (!child_count)
+ break;
+ if (menu == &rootmenu) {
+ cprint(":");
+ cprint("--- ");
+ cprint("L");
+ cprint(_(" Load an Alternate Configuration File"));
+ cprint("S");
+ cprint(_(" Save Configuration to an Alternate File"));
+ }
+ stat = exec_conf();
+ if (stat < 0)
+ continue;
+
+ if (stat == 1 || stat == 255)
+ break;
+
+ type = input_buf[0];
+ if (!type)
+ continue;
+
+ for (i = 0; input_buf[i] && !isspace(input_buf[i]); i++)
+ ;
+ if (i >= sizeof(active_entry))
+ i = sizeof(active_entry) - 1;
+ input_buf[i] = 0;
+ strcpy(active_entry, input_buf);
+
+ sym = NULL;
+ submenu = NULL;
+ if (sscanf(input_buf + 1, "%p", &submenu) == 1)
+ sym = submenu->sym;
+
+ switch (stat) {
+ case 0:
+ switch (type) {
+ case 'm':
+ if (single_menu_mode)
+ submenu->data = (void *) (long) !submenu->data;
+ else
+ conf(submenu);
+ break;
+ case 't':
+ if (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)
+ conf_choice(submenu);
+ else if (submenu->prompt->type == P_MENU)
+ conf(submenu);
+ break;
+ case 's':
+ conf_string(submenu);
+ break;
+ case 'L':
+ conf_load();
+ break;
+ case 'S':
+ conf_save();
+ break;
+ }
+ break;
+ case 2:
+ if (sym)
+ show_help(submenu);
+ else
+ show_helptext("README", _(mconf_readme));
+ break;
+ case 3:
+ if (type == 't') {
+ if (sym_set_tristate_value(sym, yes))
+ break;
+ if (sym_set_tristate_value(sym, mod))
+ show_textbox(NULL, setmod_text, 6, 74);
+ }
+ break;
+ case 4:
+ if (type == 't')
+ sym_set_tristate_value(sym, no);
+ break;
+ case 5:
+ if (type == 't')
+ sym_set_tristate_value(sym, mod);
+ break;
+ case 6:
+ if (type == 't')
+ sym_toggle_tristate_value(sym);
+ else if (type == 'm')
+ conf(submenu);
+ break;
+ case 7:
+ search_conf();
+ break;
+ }
+ }
+}
+
+static void show_textbox(const char *title, const char *text, int r, int c)
+{
+ int fd;
+
+ fd = creat(".help.tmp", 0777);
+ write(fd, text, strlen(text));
+ close(fd);
+ show_file(".help.tmp", title, r, c);
+ unlink(".help.tmp");
+}
+
+static void show_helptext(const char *title, const char *text)
+{
+ show_textbox(title, text, 0, 0);
+}
+
+static void show_help(struct menu *menu)
+{
+ struct gstr help = str_new();
+ struct symbol *sym = menu->sym;
+
+ if (sym->help)
+ {
+ if (sym->name) {
+ str_printf(&help, "CONFIG_%s:\n\n", sym->name);
+ str_append(&help, _(sym->help));
+ str_append(&help, "\n");
+ }
+ } else {
+ str_append(&help, nohelp_text);
+ }
+ get_symbol_str(&help, sym);
+ show_helptext(menu_get_prompt(menu), str_get(&help));
+ str_free(&help);
+}
+
+static void show_file(const char *filename, const char *title, int r, int c)
+{
+ do {
+ cprint_init();
+ if (title) {
+ cprint("--title");
+ cprint("%s", title);
+ }
+ cprint("--textbox");
+ cprint("%s", filename);
+ cprint("%d", r ? r : rows);
+ cprint("%d", c ? c : cols);
+ } while (exec_conf() < 0);
+}
+
+static void conf_choice(struct menu *menu)
+{
+ const char *prompt = menu_get_prompt(menu);
+ struct menu *child;
+ struct symbol *active;
+ int stat;
+
+ active = sym_get_choice_value(menu->sym);
+ while (1) {
+ cprint_init();
+ cprint("--title");
+ cprint("%s", prompt ? prompt : _("Main Menu"));
+ cprint("--radiolist");
+ cprint(_(radiolist_instructions));
+ cprint("15");
+ cprint("70");
+ cprint("6");
+
+ current_menu = menu;
+ for (child = menu->list; child; child = child->next) {
+ if (!menu_is_visible(child))
+ continue;
+ cprint("%p", child);
+ cprint("%s", menu_get_prompt(child));
+ if (child->sym == sym_get_choice_value(menu->sym))
+ cprint("ON");
+ else if (child->sym == active)
+ cprint("SELECTED");
+ else
+ cprint("OFF");
+ }
+
+ stat = exec_conf();
+ switch (stat) {
+ case 0:
+ if (sscanf(input_buf, "%p", &child) != 1)
+ break;
+ sym_set_tristate_value(child->sym, yes);
+ return;
+ case 1:
+ if (sscanf(input_buf, "%p", &child) == 1) {
+ show_help(child);
+ active = child->sym;
+ } else
+ show_help(menu);
+ break;
+ case 255:
+ return;
+ }
+ }
+}
+
+static void conf_string(struct menu *menu)
+{
+ const char *prompt = menu_get_prompt(menu);
+ int stat;
+
+ while (1) {
+ cprint_init();
+ cprint("--title");
+ cprint("%s", prompt ? prompt : _("Main Menu"));
+ cprint("--inputbox");
+ switch (sym_get_type(menu->sym)) {
+ case S_INT:
+ cprint(_(inputbox_instructions_int));
+ break;
+ case S_HEX:
+ cprint(_(inputbox_instructions_hex));
+ break;
+ case S_STRING:
+ cprint(_(inputbox_instructions_string));
+ break;
+ default:
+ /* panic? */;
+ }
+ cprint("10");
+ cprint("75");
+ cprint("%s", sym_get_string_value(menu->sym));
+ stat = exec_conf();
+ switch (stat) {
+ case 0:
+ if (sym_set_string_value(menu->sym, input_buf))
+ return;
+ show_textbox(NULL, _("You have made an invalid entry."), 5, 43);
+ break;
+ case 1:
+ show_help(menu);
+ break;
+ case 255:
+ return;
+ }
+ }
+}
+
+static void conf_load(void)
+{
+ int stat;
+
+ while (1) {
+ cprint_init();
+ cprint("--inputbox");
+ cprint(load_config_text);
+ cprint("11");
+ cprint("55");
+ cprint("%s", filename);
+ stat = exec_conf();
+ switch(stat) {
+ case 0:
+ if (!input_buf[0])
+ return;
+ if (!conf_read(input_buf))
+ return;
+ show_textbox(NULL, _("File does not exist!"), 5, 38);
+ break;
+ case 1:
+ show_helptext(_("Load Alternate Configuration"), load_config_help);
+ break;
+ case 255:
+ return;
+ }
+ }
+}
+
+static void conf_save(void)
+{
+ int stat;
+
+ while (1) {
+ cprint_init();
+ cprint("--inputbox");
+ cprint(save_config_text);
+ cprint("11");
+ cprint("55");
+ cprint("%s", filename);
+ stat = exec_conf();
+ switch(stat) {
+ case 0:
+ if (!input_buf[0])
+ return;
+ if (!conf_write(input_buf))
+ return;
+ show_textbox(NULL, _("Can't create file! Probably a nonexistent directory."), 5, 60);
+ break;
+ case 1:
+ show_helptext(_("Save Alternate Configuration"), save_config_help);
+ break;
+ case 255:
+ return;
+ }
+ }
+}
+
+static void conf_cleanup(void)
+{
+ tcsetattr(1, TCSAFLUSH, &ios_org);
+ unlink(".help.tmp");
+ unlink("lxdialog.scrltmp");
+}
+
+int main(int ac, char **av)
+{
+ struct symbol *sym;
+ char *mode;
+ int stat;
+
+ setlocale(LC_ALL, "");
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+ conf_parse(av[1]);
+ conf_read(NULL);
+
+ sym = sym_lookup("KERNELVERSION", 0);
+ sym_calc_value(sym);
+ sprintf(menu_backtitle, _("LWK Kernel v%s Configuration"),
+ sym_get_string_value(sym));
+
+ mode = getenv("MENUCONFIG_MODE");
+ if (mode) {
+ if (!strcasecmp(mode, "single_menu"))
+ single_menu_mode = 1;
+ }
+
+ tcgetattr(1, &ios_org);
+ atexit(conf_cleanup);
+ init_wsize();
+ conf(&rootmenu);
+
+ do {
+ cprint_init();
+ cprint("--yesno");
+ cprint(_("Do you wish to save your new kernel configuration?"));
+ cprint("5");
+ cprint("60");
+ stat = exec_conf();
+ } while (stat < 0);
+
+ if (stat == 0) {
+ if (conf_write(NULL)) {
+ fprintf(stderr, _("\n\n"
+ "Error during writing of the kernel configuration.\n"
+ "Your kernel configuration changes were NOT saved."
+ "\n\n"));
+ return 1;
+ }
+ printf(_("\n\n"
+ "*** End of LWK kernel configuration.\n"
+ "*** Execute 'make' to build the kernel or try 'make help'."
+ "\n\n"));
+ } else {
+ fprintf(stderr, _("\n\n"
+ "Your kernel configuration changes were NOT saved."
+ "\n\n"));
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+struct menu rootmenu;
+static struct menu **last_entry_ptr;
+
+struct file *file_list;
+struct file *current_file;
+
+static void menu_warn(struct menu *menu, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ fprintf(stderr, "%s:%d:warning: ", menu->file->name, menu->lineno);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+static void prop_warn(struct property *prop, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ fprintf(stderr, "%s:%d:warning: ", prop->file->name, prop->lineno);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+void menu_init(void)
+{
+ current_entry = current_menu = &rootmenu;
+ last_entry_ptr = &rootmenu.list;
+}
+
+void menu_add_entry(struct symbol *sym)
+{
+ struct menu *menu;
+
+ menu = malloc(sizeof(*menu));
+ memset(menu, 0, sizeof(*menu));
+ menu->sym = sym;
+ menu->parent = current_menu;
+ menu->file = current_file;
+ menu->lineno = zconf_lineno();
+
+ *last_entry_ptr = menu;
+ last_entry_ptr = &menu->next;
+ current_entry = menu;
+}
+
+void menu_end_entry(void)
+{
+}
+
+struct menu *menu_add_menu(void)
+{
+ menu_end_entry();
+ last_entry_ptr = ¤t_entry->list;
+ return current_menu = current_entry;
+}
+
+void menu_end_menu(void)
+{
+ last_entry_ptr = ¤t_menu->next;
+ current_menu = current_menu->parent;
+}
+
+struct expr *menu_check_dep(struct expr *e)
+{
+ if (!e)
+ return e;
+
+ switch (e->type) {
+ case E_NOT:
+ e->left.expr = menu_check_dep(e->left.expr);
+ break;
+ case E_OR:
+ case E_AND:
+ e->left.expr = menu_check_dep(e->left.expr);
+ e->right.expr = menu_check_dep(e->right.expr);
+ break;
+ case E_SYMBOL:
+ /* change 'm' into 'm' && MODULES */
+ if (e->left.sym == &symbol_mod)
+ return expr_alloc_and(e, expr_alloc_symbol(modules_sym));
+ break;
+ default:
+ break;
+ }
+ return e;
+}
+
+void menu_add_dep(struct expr *dep)
+{
+ current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep));
+}
+
+void menu_set_type(int type)
+{
+ struct symbol *sym = current_entry->sym;
+
+ if (sym->type == type)
+ return;
+ if (sym->type == S_UNKNOWN) {
+ sym->type = type;
+ return;
+ }
+ menu_warn(current_entry, "type of '%s' redefined from '%s' to '%s'\n",
+ sym->name ? sym->name : "<choice>",
+ sym_type_name(sym->type), sym_type_name(type));
+}
+
+struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep)
+{
+ struct property *prop = prop_alloc(type, current_entry->sym);
+
+ prop->menu = current_entry;
+ prop->text = prompt;
+ prop->expr = expr;
+ prop->visible.expr = menu_check_dep(dep);
+
+ if (prompt) {
+ if (current_entry->prompt)
+ menu_warn(current_entry, "prompt redefined\n");
+ current_entry->prompt = prop;
+ }
+
+ return prop;
+}
+
+struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep)
+{
+ return menu_add_prop(type, prompt, NULL, dep);
+}
+
+void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep)
+{
+ menu_add_prop(type, NULL, expr, dep);
+}
+
+void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep)
+{
+ menu_add_prop(type, NULL, expr_alloc_symbol(sym), dep);
+}
+
+static int menu_range_valid_sym(struct symbol *sym, struct symbol *sym2)
+{
+ return sym2->type == S_INT || sym2->type == S_HEX ||
+ (sym2->type == S_UNKNOWN && sym_string_valid(sym, sym2->name));
+}
+
+void sym_check_prop(struct symbol *sym)
+{
+ struct property *prop;
+ struct symbol *sym2;
+ for (prop = sym->prop; prop; prop = prop->next) {
+ switch (prop->type) {
+ case P_DEFAULT:
+ if ((sym->type == S_STRING || sym->type == S_INT || sym->type == S_HEX) &&
+ prop->expr->type != E_SYMBOL)
+ prop_warn(prop,
+ "default for config symbol '%'"
+ " must be a single symbol", sym->name);
+ break;
+ case P_SELECT:
+ sym2 = prop_get_symbol(prop);
+ if (sym->type != S_BOOLEAN && sym->type != S_TRISTATE)
+ prop_warn(prop,
+ "config symbol '%s' uses select, but is "
+ "not boolean or tristate", sym->name);
+ else if (sym2->type == S_UNKNOWN)
+ prop_warn(prop,
+ "'select' used by config symbol '%s' "
+ "refer to undefined symbol '%s'",
+ sym->name, sym2->name);
+ else if (sym2->type != S_BOOLEAN && sym2->type != S_TRISTATE)
+ prop_warn(prop,
+ "'%s' has wrong type. 'select' only "
+ "accept arguments of boolean and "
+ "tristate type", sym2->name);
+ break;
+ case P_RANGE:
+ if (sym->type != S_INT && sym->type != S_HEX)
+ prop_warn(prop, "range is only allowed "
+ "for int or hex symbols");
+ if (!menu_range_valid_sym(sym, prop->expr->left.sym) ||
+ !menu_range_valid_sym(sym, prop->expr->right.sym))
+ prop_warn(prop, "range is invalid");
+ break;
+ default:
+ ;
+ }
+ }
+}
+
+void menu_finalize(struct menu *parent)
+{
+ struct menu *menu, *last_menu;
+ struct symbol *sym;
+ struct property *prop;
+ struct expr *parentdep, *basedep, *dep, *dep2, **ep;
+
+ sym = parent->sym;
+ if (parent->list) {
+ if (sym && sym_is_choice(sym)) {
+ /* find the first choice value and find out choice type */
+ for (menu = parent->list; menu; menu = menu->next) {
+ if (menu->sym) {
+ current_entry = parent;
+ menu_set_type(menu->sym->type);
+ current_entry = menu;
+ menu_set_type(sym->type);
+ break;
+ }
+ }
+ parentdep = expr_alloc_symbol(sym);
+ } else if (parent->prompt)
+ parentdep = parent->prompt->visible.expr;
+ else
+ parentdep = parent->dep;
+
+ for (menu = parent->list; menu; menu = menu->next) {
+ basedep = expr_transform(menu->dep);
+ basedep = expr_alloc_and(expr_copy(parentdep), basedep);
+ basedep = expr_eliminate_dups(basedep);
+ menu->dep = basedep;
+ if (menu->sym)
+ prop = menu->sym->prop;
+ else
+ prop = menu->prompt;
+ for (; prop; prop = prop->next) {
+ if (prop->menu != menu)
+ continue;
+ dep = expr_transform(prop->visible.expr);
+ dep = expr_alloc_and(expr_copy(basedep), dep);
+ dep = expr_eliminate_dups(dep);
+ if (menu->sym && menu->sym->type != S_TRISTATE)
+ dep = expr_trans_bool(dep);
+ prop->visible.expr = dep;
+ if (prop->type == P_SELECT) {
+ struct symbol *es = prop_get_symbol(prop);
+ es->rev_dep.expr = expr_alloc_or(es->rev_dep.expr,
+ expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep)));
+ }
+ }
+ }
+ for (menu = parent->list; menu; menu = menu->next)
+ menu_finalize(menu);
+ } else if (sym) {
+ basedep = parent->prompt ? parent->prompt->visible.expr : NULL;
+ basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no);
+ basedep = expr_eliminate_dups(expr_transform(basedep));
+ last_menu = NULL;
+ for (menu = parent->next; menu; menu = menu->next) {
+ dep = menu->prompt ? menu->prompt->visible.expr : menu->dep;
+ if (!expr_contains_symbol(dep, sym))
+ break;
+ if (expr_depends_symbol(dep, sym))
+ goto next;
+ dep = expr_trans_compare(dep, E_UNEQUAL, &symbol_no);
+ dep = expr_eliminate_dups(expr_transform(dep));
+ dep2 = expr_copy(basedep);
+ expr_eliminate_eq(&dep, &dep2);
+ expr_free(dep);
+ if (!expr_is_yes(dep2)) {
+ expr_free(dep2);
+ break;
+ }
+ expr_free(dep2);
+ next:
+ menu_finalize(menu);
+ menu->parent = parent;
+ last_menu = menu;
+ }
+ if (last_menu) {
+ parent->list = parent->next;
+ parent->next = last_menu->next;
+ last_menu->next = NULL;
+ }
+ }
+ for (menu = parent->list; menu; menu = menu->next) {
+ if (sym && sym_is_choice(sym) && menu->sym) {
+ menu->sym->flags |= SYMBOL_CHOICEVAL;
+ if (!menu->prompt)
+ menu_warn(menu, "choice value must have a prompt");
+ for (prop = menu->sym->prop; prop; prop = prop->next) {
+ if (prop->type == P_PROMPT && prop->menu != menu) {
+ prop_warn(prop, "choice values "
+ "currently only support a "
+ "single prompt");
+ }
+ if (prop->type == P_DEFAULT)
+ prop_warn(prop, "defaults for choice "
+ "values not supported");
+ }
+ current_entry = menu;
+ menu_set_type(sym->type);
+ menu_add_symbol(P_CHOICE, sym, NULL);
+ prop = sym_get_choice_prop(sym);
+ for (ep = &prop->expr; *ep; ep = &(*ep)->left.expr)
+ ;
+ *ep = expr_alloc_one(E_CHOICE, NULL);
+ (*ep)->right.sym = menu->sym;
+ }
+ if (menu->list && (!menu->prompt || !menu->prompt->text)) {
+ for (last_menu = menu->list; ; last_menu = last_menu->next) {
+ last_menu->parent = parent;
+ if (!last_menu->next)
+ break;
+ }
+ last_menu->next = menu->next;
+ menu->next = menu->list;
+ menu->list = NULL;
+ }
+ }
+
+ if (sym && !(sym->flags & SYMBOL_WARNED)) {
+ if (sym->type == S_UNKNOWN)
+ menu_warn(parent, "config symbol defined "
+ "without type\n");
+
+ if (sym_is_choice(sym) && !parent->prompt)
+ menu_warn(parent, "choice must have a prompt\n");
+
+ /* Check properties connected to this symbol */
+ sym_check_prop(sym);
+ sym->flags |= SYMBOL_WARNED;
+ }
+
+ if (sym && !sym_is_optional(sym) && parent->prompt) {
+ sym->rev_dep.expr = expr_alloc_or(sym->rev_dep.expr,
+ expr_alloc_and(parent->prompt->visible.expr,
+ expr_alloc_symbol(&symbol_mod)));
+ }
+}
+
+bool menu_is_visible(struct menu *menu)
+{
+ struct menu *child;
+ struct symbol *sym;
+ tristate visible;
+
+ if (!menu->prompt)
+ return false;
+ sym = menu->sym;
+ if (sym) {
+ sym_calc_value(sym);
+ visible = menu->prompt->visible.tri;
+ } else
+ visible = menu->prompt->visible.tri = expr_calc_value(menu->prompt->visible.expr);
+
+ if (visible != no)
+ return true;
+ if (!sym || sym_get_tristate_value(menu->sym) == no)
+ return false;
+
+ for (child = menu->list; child; child = child->next)
+ if (menu_is_visible(child))
+ return true;
+ return false;
+}
+
+const char *menu_get_prompt(struct menu *menu)
+{
+ if (menu->prompt)
+ return _(menu->prompt->text);
+ else if (menu->sym)
+ return _(menu->sym->name);
+ return NULL;
+}
+
+struct menu *menu_get_root_menu(struct menu *menu)
+{
+ return &rootmenu;
+}
+
+struct menu *menu_get_parent_menu(struct menu *menu)
+{
+ enum prop_type type;
+
+ for (; menu != &rootmenu; menu = menu->parent) {
+ type = menu->prompt ? menu->prompt->type : 0;
+ if (type == P_MENU)
+ break;
+ }
+ return menu;
+}
+
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <qapplication.h>
+#include <qmainwindow.h>
+#include <qtoolbar.h>
+#include <qvbox.h>
+#include <qsplitter.h>
+#include <qlistview.h>
+#include <qtextview.h>
+#include <qlineedit.h>
+#include <qmenubar.h>
+#include <qmessagebox.h>
+#include <qaction.h>
+#include <qheader.h>
+#include <qfiledialog.h>
+#include <qregexp.h>
+
+#include <stdlib.h>
+
+#include "lkc.h"
+#include "qconf.h"
+
+#include "qconf.moc"
+#include "images.c"
+
+#ifdef _
+# undef _
+# define _ qgettext
+#endif
+
+static QApplication *configApp;
+
+static inline QString qgettext(const char* str)
+{
+ return QString::fromLocal8Bit(gettext(str));
+}
+
+static inline QString qgettext(const QString& str)
+{
+ return QString::fromLocal8Bit(gettext(str.latin1()));
+}
+
+ConfigSettings::ConfigSettings()
+ : showAll(false), showName(false), showRange(false), showData(false)
+{
+}
+
+#if QT_VERSION >= 300
+/**
+ * Reads the list column settings from the application settings.
+ */
+void ConfigSettings::readListSettings()
+{
+ showAll = readBoolEntry("/kconfig/qconf/showAll", false);
+ showName = readBoolEntry("/kconfig/qconf/showName", false);
+ showRange = readBoolEntry("/kconfig/qconf/showRange", false);
+ showData = readBoolEntry("/kconfig/qconf/showData", false);
+}
+
+/**
+ * Reads a list of integer values from the application settings.
+ */
+QValueList<int> ConfigSettings::readSizes(const QString& key, bool *ok)
+{
+ QValueList<int> result;
+ QStringList entryList = readListEntry(key, ok);
+ if (ok) {
+ QStringList::Iterator it;
+ for (it = entryList.begin(); it != entryList.end(); ++it)
+ result.push_back((*it).toInt());
+ }
+
+ return result;
+}
+
+/**
+ * Writes a list of integer values to the application settings.
+ */
+bool ConfigSettings::writeSizes(const QString& key, const QValueList<int>& value)
+{
+ QStringList stringList;
+ QValueList<int>::ConstIterator it;
+
+ for (it = value.begin(); it != value.end(); ++it)
+ stringList.push_back(QString::number(*it));
+ return writeEntry(key, stringList);
+}
+#endif
+
+
+/*
+ * update all the children of a menu entry
+ * removes/adds the entries from the parent widget as necessary
+ *
+ * parent: either the menu list widget or a menu entry widget
+ * menu: entry to be updated
+ */
+template <class P>
+void ConfigList::updateMenuList(P* parent, struct menu* menu)
+{
+ struct menu* child;
+ ConfigItem* item;
+ ConfigItem* last;
+ bool visible;
+ enum prop_type type;
+
+ if (!menu) {
+ while ((item = parent->firstChild()))
+ delete item;
+ return;
+ }
+
+ last = parent->firstChild();
+ if (last && !last->goParent)
+ last = 0;
+ for (child = menu->list; child; child = child->next) {
+ item = last ? last->nextSibling() : parent->firstChild();
+ type = child->prompt ? child->prompt->type : P_UNKNOWN;
+
+ switch (mode) {
+ case menuMode:
+ if (!(child->flags & MENU_ROOT))
+ goto hide;
+ break;
+ case symbolMode:
+ if (child->flags & MENU_ROOT)
+ goto hide;
+ break;
+ default:
+ break;
+ }
+
+ visible = menu_is_visible(child);
+ if (showAll || visible) {
+ if (!item || item->menu != child)
+ item = new ConfigItem(parent, last, child, visible);
+ else
+ item->testUpdateMenu(visible);
+
+ if (mode == fullMode || mode == menuMode || type != P_MENU)
+ updateMenuList(item, child);
+ else
+ updateMenuList(item, 0);
+ last = item;
+ continue;
+ }
+ hide:
+ if (item && item->menu == child) {
+ last = parent->firstChild();
+ if (last == item)
+ last = 0;
+ else while (last->nextSibling() != item)
+ last = last->nextSibling();
+ delete item;
+ }
+ }
+}
+
+#if QT_VERSION >= 300
+/*
+ * set the new data
+ * TODO check the value
+ */
+void ConfigItem::okRename(int col)
+{
+ Parent::okRename(col);
+ sym_set_string_value(menu->sym, text(dataColIdx).latin1());
+}
+#endif
+
+/*
+ * update the displayed of a menu entry
+ */
+void ConfigItem::updateMenu(void)
+{
+ ConfigList* list;
+ struct symbol* sym;
+ struct property *prop;
+ QString prompt;
+ int type;
+ tristate expr;
+
+ list = listView();
+ if (goParent) {
+ setPixmap(promptColIdx, list->menuBackPix);
+ prompt = "..";
+ goto set_prompt;
+ }
+
+ sym = menu->sym;
+ prop = menu->prompt;
+ prompt = QString::fromLocal8Bit(menu_get_prompt(menu));
+
+ if (prop) switch (prop->type) {
+ case P_MENU:
+ if (list->mode == singleMode || list->mode == symbolMode) {
+ /* a menuconfig entry is displayed differently
+ * depending whether it's at the view root or a child.
+ */
+ if (sym && list->rootEntry == menu)
+ break;
+ setPixmap(promptColIdx, list->menuPix);
+ } else {
+ if (sym)
+ break;
+ setPixmap(promptColIdx, 0);
+ }
+ goto set_prompt;
+ case P_COMMENT:
+ setPixmap(promptColIdx, 0);
+ goto set_prompt;
+ default:
+ ;
+ }
+ if (!sym)
+ goto set_prompt;
+
+ setText(nameColIdx, QString::fromLocal8Bit(sym->name));
+
+ type = sym_get_type(sym);
+ switch (type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ char ch;
+
+ if (!sym_is_changable(sym) && !list->showAll) {
+ setPixmap(promptColIdx, 0);
+ setText(noColIdx, QString::null);
+ setText(modColIdx, QString::null);
+ setText(yesColIdx, QString::null);
+ break;
+ }
+ expr = sym_get_tristate_value(sym);
+ switch (expr) {
+ case yes:
+ if (sym_is_choice_value(sym) && type == S_BOOLEAN)
+ setPixmap(promptColIdx, list->choiceYesPix);
+ else
+ setPixmap(promptColIdx, list->symbolYesPix);
+ setText(yesColIdx, "Y");
+ ch = 'Y';
+ break;
+ case mod:
+ setPixmap(promptColIdx, list->symbolModPix);
+ setText(modColIdx, "M");
+ ch = 'M';
+ break;
+ default:
+ if (sym_is_choice_value(sym) && type == S_BOOLEAN)
+ setPixmap(promptColIdx, list->choiceNoPix);
+ else
+ setPixmap(promptColIdx, list->symbolNoPix);
+ setText(noColIdx, "N");
+ ch = 'N';
+ break;
+ }
+ if (expr != no)
+ setText(noColIdx, sym_tristate_within_range(sym, no) ? "_" : 0);
+ if (expr != mod)
+ setText(modColIdx, sym_tristate_within_range(sym, mod) ? "_" : 0);
+ if (expr != yes)
+ setText(yesColIdx, sym_tristate_within_range(sym, yes) ? "_" : 0);
+
+ setText(dataColIdx, QChar(ch));
+ break;
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+ const char* data;
+
+ data = sym_get_string_value(sym);
+
+#if QT_VERSION >= 300
+ int i = list->mapIdx(dataColIdx);
+ if (i >= 0)
+ setRenameEnabled(i, TRUE);
+#endif
+ setText(dataColIdx, data);
+ if (type == S_STRING)
+ prompt = QString("%1: %2").arg(prompt).arg(data);
+ else
+ prompt = QString("(%2) %1").arg(prompt).arg(data);
+ break;
+ }
+ if (!sym_has_value(sym) && visible)
+ prompt += " (NEW)";
+set_prompt:
+ setText(promptColIdx, prompt);
+}
+
+void ConfigItem::testUpdateMenu(bool v)
+{
+ ConfigItem* i;
+
+ visible = v;
+ if (!menu)
+ return;
+
+ sym_calc_value(menu->sym);
+ if (menu->flags & MENU_CHANGED) {
+ /* the menu entry changed, so update all list items */
+ menu->flags &= ~MENU_CHANGED;
+ for (i = (ConfigItem*)menu->data; i; i = i->nextItem)
+ i->updateMenu();
+ } else if (listView()->updateAll)
+ updateMenu();
+}
+
+void ConfigItem::paintCell(QPainter* p, const QColorGroup& cg, int column, int width, int align)
+{
+ ConfigList* list = listView();
+
+ if (visible) {
+ if (isSelected() && !list->hasFocus() && list->mode == menuMode)
+ Parent::paintCell(p, list->inactivedColorGroup, column, width, align);
+ else
+ Parent::paintCell(p, cg, column, width, align);
+ } else
+ Parent::paintCell(p, list->disabledColorGroup, column, width, align);
+}
+
+/*
+ * construct a menu entry
+ */
+void ConfigItem::init(void)
+{
+ if (menu) {
+ ConfigList* list = listView();
+ nextItem = (ConfigItem*)menu->data;
+ menu->data = this;
+
+ if (list->mode != fullMode)
+ setOpen(TRUE);
+ sym_calc_value(menu->sym);
+ }
+ updateMenu();
+}
+
+/*
+ * destruct a menu entry
+ */
+ConfigItem::~ConfigItem(void)
+{
+ if (menu) {
+ ConfigItem** ip = (ConfigItem**)&menu->data;
+ for (; *ip; ip = &(*ip)->nextItem) {
+ if (*ip == this) {
+ *ip = nextItem;
+ break;
+ }
+ }
+ }
+}
+
+void ConfigLineEdit::show(ConfigItem* i)
+{
+ item = i;
+ if (sym_get_string_value(item->menu->sym))
+ setText(QString::fromLocal8Bit(sym_get_string_value(item->menu->sym)));
+ else
+ setText(QString::null);
+ Parent::show();
+ setFocus();
+}
+
+void ConfigLineEdit::keyPressEvent(QKeyEvent* e)
+{
+ switch (e->key()) {
+ case Key_Escape:
+ break;
+ case Key_Return:
+ case Key_Enter:
+ sym_set_string_value(item->menu->sym, text().latin1());
+ parent()->updateList(item);
+ break;
+ default:
+ Parent::keyPressEvent(e);
+ return;
+ }
+ e->accept();
+ parent()->list->setFocus();
+ hide();
+}
+
+ConfigList::ConfigList(ConfigView* p, ConfigMainWindow* cv, ConfigSettings* configSettings)
+ : Parent(p), cview(cv),
+ updateAll(false),
+ symbolYesPix(xpm_symbol_yes), symbolModPix(xpm_symbol_mod), symbolNoPix(xpm_symbol_no),
+ choiceYesPix(xpm_choice_yes), choiceNoPix(xpm_choice_no),
+ menuPix(xpm_menu), menuInvPix(xpm_menu_inv), menuBackPix(xpm_menuback), voidPix(xpm_void),
+ showAll(false), showName(false), showRange(false), showData(false),
+ rootEntry(0)
+{
+ int i;
+
+ setSorting(-1);
+ setRootIsDecorated(TRUE);
+ disabledColorGroup = palette().active();
+ disabledColorGroup.setColor(QColorGroup::Text, palette().disabled().text());
+ inactivedColorGroup = palette().active();
+ inactivedColorGroup.setColor(QColorGroup::Highlight, palette().disabled().highlight());
+
+ connect(this, SIGNAL(selectionChanged(void)),
+ SLOT(updateSelection(void)));
+
+ if (configSettings) {
+ showAll = configSettings->showAll;
+ showName = configSettings->showName;
+ showRange = configSettings->showRange;
+ showData = configSettings->showData;
+ }
+
+ for (i = 0; i < colNr; i++)
+ colMap[i] = colRevMap[i] = -1;
+ addColumn(promptColIdx, "Option");
+
+ reinit();
+}
+
+void ConfigList::reinit(void)
+{
+ removeColumn(dataColIdx);
+ removeColumn(yesColIdx);
+ removeColumn(modColIdx);
+ removeColumn(noColIdx);
+ removeColumn(nameColIdx);
+
+ if (showName)
+ addColumn(nameColIdx, "Name");
+ if (showRange) {
+ addColumn(noColIdx, "N");
+ addColumn(modColIdx, "M");
+ addColumn(yesColIdx, "Y");
+ }
+ if (showData)
+ addColumn(dataColIdx, "Value");
+
+ updateListAll();
+}
+
+void ConfigList::updateSelection(void)
+{
+ struct menu *menu;
+ enum prop_type type;
+
+ ConfigItem* item = (ConfigItem*)selectedItem();
+ if (!item)
+ return;
+
+ cview->setHelp(item);
+
+ menu = item->menu;
+ if (!menu)
+ return;
+ type = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ if (mode == menuMode && type == P_MENU)
+ emit menuSelected(menu);
+}
+
+void ConfigList::updateList(ConfigItem* item)
+{
+ ConfigItem* last = 0;
+
+ if (!rootEntry)
+ goto update;
+
+ if (rootEntry != &rootmenu && (mode == singleMode ||
+ (mode == symbolMode && rootEntry->parent != &rootmenu))) {
+ item = firstChild();
+ if (!item)
+ item = new ConfigItem(this, 0, true);
+ last = item;
+ }
+ if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) &&
+ rootEntry->sym && rootEntry->prompt) {
+ item = last ? last->nextSibling() : firstChild();
+ if (!item)
+ item = new ConfigItem(this, last, rootEntry, true);
+ else
+ item->testUpdateMenu(true);
+
+ updateMenuList(item, rootEntry);
+ triggerUpdate();
+ return;
+ }
+update:
+ updateMenuList(this, rootEntry);
+ triggerUpdate();
+}
+
+void ConfigList::setAllOpen(bool open)
+{
+ QListViewItemIterator it(this);
+
+ for (; it.current(); it++)
+ it.current()->setOpen(open);
+}
+
+void ConfigList::setValue(ConfigItem* item, tristate val)
+{
+ struct symbol* sym;
+ int type;
+ tristate oldval;
+
+ sym = item->menu ? item->menu->sym : 0;
+ if (!sym)
+ return;
+
+ type = sym_get_type(sym);
+ switch (type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ oldval = sym_get_tristate_value(sym);
+
+ if (!sym_set_tristate_value(sym, val))
+ return;
+ if (oldval == no && item->menu->list)
+ item->setOpen(TRUE);
+ parent()->updateList(item);
+ break;
+ }
+}
+
+void ConfigList::changeValue(ConfigItem* item)
+{
+ struct symbol* sym;
+ struct menu* menu;
+ int type, oldexpr, newexpr;
+
+ menu = item->menu;
+ if (!menu)
+ return;
+ sym = menu->sym;
+ if (!sym) {
+ if (item->menu->list)
+ item->setOpen(!item->isOpen());
+ return;
+ }
+
+ type = sym_get_type(sym);
+ switch (type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ oldexpr = sym_get_tristate_value(sym);
+ newexpr = sym_toggle_tristate_value(sym);
+ if (item->menu->list) {
+ if (oldexpr == newexpr)
+ item->setOpen(!item->isOpen());
+ else if (oldexpr == no)
+ item->setOpen(TRUE);
+ }
+ if (oldexpr != newexpr)
+ parent()->updateList(item);
+ break;
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+#if QT_VERSION >= 300
+ if (colMap[dataColIdx] >= 0)
+ item->startRename(colMap[dataColIdx]);
+ else
+#endif
+ parent()->lineEdit->show(item);
+ break;
+ }
+}
+
+void ConfigList::setRootMenu(struct menu *menu)
+{
+ enum prop_type type;
+
+ if (rootEntry == menu)
+ return;
+ type = menu && menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ if (type != P_MENU)
+ return;
+ updateMenuList(this, 0);
+ rootEntry = menu;
+ updateListAll();
+ setSelected(currentItem(), hasFocus());
+}
+
+void ConfigList::setParentMenu(void)
+{
+ ConfigItem* item;
+ struct menu *oldroot;
+
+ oldroot = rootEntry;
+ if (rootEntry == &rootmenu)
+ return;
+ setRootMenu(menu_get_parent_menu(rootEntry->parent));
+
+ QListViewItemIterator it(this);
+ for (; (item = (ConfigItem*)it.current()); it++) {
+ if (item->menu == oldroot) {
+ setCurrentItem(item);
+ ensureItemVisible(item);
+ break;
+ }
+ }
+}
+
+void ConfigList::keyPressEvent(QKeyEvent* ev)
+{
+ QListViewItem* i = currentItem();
+ ConfigItem* item;
+ struct menu *menu;
+ enum prop_type type;
+
+ if (ev->key() == Key_Escape && mode != fullMode) {
+ emit parentSelected();
+ ev->accept();
+ return;
+ }
+
+ if (!i) {
+ Parent::keyPressEvent(ev);
+ return;
+ }
+ item = (ConfigItem*)i;
+
+ switch (ev->key()) {
+ case Key_Return:
+ case Key_Enter:
+ if (item->goParent) {
+ emit parentSelected();
+ break;
+ }
+ menu = item->menu;
+ if (!menu)
+ break;
+ type = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ if (type == P_MENU && rootEntry != menu &&
+ mode != fullMode && mode != menuMode) {
+ emit menuSelected(menu);
+ break;
+ }
+ case Key_Space:
+ changeValue(item);
+ break;
+ case Key_N:
+ setValue(item, no);
+ break;
+ case Key_M:
+ setValue(item, mod);
+ break;
+ case Key_Y:
+ setValue(item, yes);
+ break;
+ default:
+ Parent::keyPressEvent(ev);
+ return;
+ }
+ ev->accept();
+}
+
+void ConfigList::contentsMousePressEvent(QMouseEvent* e)
+{
+ //QPoint p(contentsToViewport(e->pos()));
+ //printf("contentsMousePressEvent: %d,%d\n", p.x(), p.y());
+ Parent::contentsMousePressEvent(e);
+}
+
+void ConfigList::contentsMouseReleaseEvent(QMouseEvent* e)
+{
+ QPoint p(contentsToViewport(e->pos()));
+ ConfigItem* item = (ConfigItem*)itemAt(p);
+ struct menu *menu;
+ enum prop_type ptype;
+ const QPixmap* pm;
+ int idx, x;
+
+ if (!item)
+ goto skip;
+
+ menu = item->menu;
+ x = header()->offset() + p.x();
+ idx = colRevMap[header()->sectionAt(x)];
+ switch (idx) {
+ case promptColIdx:
+ pm = item->pixmap(promptColIdx);
+ if (pm) {
+ int off = header()->sectionPos(0) + itemMargin() +
+ treeStepSize() * (item->depth() + (rootIsDecorated() ? 1 : 0));
+ if (x >= off && x < off + pm->width()) {
+ if (item->goParent) {
+ emit parentSelected();
+ break;
+ } else if (!menu)
+ break;
+ ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ if (ptype == P_MENU && rootEntry != menu &&
+ mode != fullMode && mode != menuMode)
+ emit menuSelected(menu);
+ else
+ changeValue(item);
+ }
+ }
+ break;
+ case noColIdx:
+ setValue(item, no);
+ break;
+ case modColIdx:
+ setValue(item, mod);
+ break;
+ case yesColIdx:
+ setValue(item, yes);
+ break;
+ case dataColIdx:
+ changeValue(item);
+ break;
+ }
+
+skip:
+ //printf("contentsMouseReleaseEvent: %d,%d\n", p.x(), p.y());
+ Parent::contentsMouseReleaseEvent(e);
+}
+
+void ConfigList::contentsMouseMoveEvent(QMouseEvent* e)
+{
+ //QPoint p(contentsToViewport(e->pos()));
+ //printf("contentsMouseMoveEvent: %d,%d\n", p.x(), p.y());
+ Parent::contentsMouseMoveEvent(e);
+}
+
+void ConfigList::contentsMouseDoubleClickEvent(QMouseEvent* e)
+{
+ QPoint p(contentsToViewport(e->pos()));
+ ConfigItem* item = (ConfigItem*)itemAt(p);
+ struct menu *menu;
+ enum prop_type ptype;
+
+ if (!item)
+ goto skip;
+ if (item->goParent) {
+ emit parentSelected();
+ goto skip;
+ }
+ menu = item->menu;
+ if (!menu)
+ goto skip;
+ ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ if (ptype == P_MENU && (mode == singleMode || mode == symbolMode))
+ emit menuSelected(menu);
+ else if (menu->sym)
+ changeValue(item);
+
+skip:
+ //printf("contentsMouseDoubleClickEvent: %d,%d\n", p.x(), p.y());
+ Parent::contentsMouseDoubleClickEvent(e);
+}
+
+void ConfigList::focusInEvent(QFocusEvent *e)
+{
+ Parent::focusInEvent(e);
+
+ QListViewItem* item = currentItem();
+ if (!item)
+ return;
+
+ setSelected(item, TRUE);
+ emit gotFocus();
+}
+
+ConfigView* ConfigView::viewList;
+
+ConfigView::ConfigView(QWidget* parent, ConfigMainWindow* cview,
+ ConfigSettings *configSettings)
+ : Parent(parent)
+{
+ list = new ConfigList(this, cview, configSettings);
+ lineEdit = new ConfigLineEdit(this);
+ lineEdit->hide();
+
+ this->nextView = viewList;
+ viewList = this;
+}
+
+ConfigView::~ConfigView(void)
+{
+ ConfigView** vp;
+
+ for (vp = &viewList; *vp; vp = &(*vp)->nextView) {
+ if (*vp == this) {
+ *vp = nextView;
+ break;
+ }
+ }
+}
+
+void ConfigView::updateList(ConfigItem* item)
+{
+ ConfigView* v;
+
+ for (v = viewList; v; v = v->nextView)
+ v->list->updateList(item);
+}
+
+void ConfigView::updateListAll(void)
+{
+ ConfigView* v;
+
+ for (v = viewList; v; v = v->nextView)
+ v->list->updateListAll();
+}
+
+/*
+ * Construct the complete config widget
+ */
+ConfigMainWindow::ConfigMainWindow(void)
+{
+ QMenuBar* menu;
+ bool ok;
+ int x, y, width, height;
+
+ QWidget *d = configApp->desktop();
+
+ ConfigSettings* configSettings = new ConfigSettings();
+#if QT_VERSION >= 300
+ width = configSettings->readNumEntry("/kconfig/qconf/window width", d->width() - 64);
+ height = configSettings->readNumEntry("/kconfig/qconf/window height", d->height() - 64);
+ resize(width, height);
+ x = configSettings->readNumEntry("/kconfig/qconf/window x", 0, &ok);
+ if (ok)
+ y = configSettings->readNumEntry("/kconfig/qconf/window y", 0, &ok);
+ if (ok)
+ move(x, y);
+ showDebug = configSettings->readBoolEntry("/kconfig/qconf/showDebug", false);
+
+ // read list settings into configSettings, will be used later for ConfigList setup
+ configSettings->readListSettings();
+#else
+ width = d->width() - 64;
+ height = d->height() - 64;
+ resize(width, height);
+ showDebug = false;
+#endif
+
+ split1 = new QSplitter(this);
+ split1->setOrientation(QSplitter::Horizontal);
+ setCentralWidget(split1);
+
+ menuView = new ConfigView(split1, this, configSettings);
+ menuList = menuView->list;
+
+ split2 = new QSplitter(split1);
+ split2->setOrientation(QSplitter::Vertical);
+
+ // create config tree
+ configView = new ConfigView(split2, this, configSettings);
+ configList = configView->list;
+
+ helpText = new QTextView(split2);
+ helpText->setTextFormat(Qt::RichText);
+
+ setTabOrder(configList, helpText);
+ configList->setFocus();
+
+ menu = menuBar();
+ toolBar = new QToolBar("Tools", this);
+
+ backAction = new QAction("Back", QPixmap(xpm_back), "Back", 0, this);
+ connect(backAction, SIGNAL(activated()), SLOT(goBack()));
+ backAction->setEnabled(FALSE);
+ QAction *quitAction = new QAction("Quit", "&Quit", CTRL+Key_Q, this);
+ connect(quitAction, SIGNAL(activated()), SLOT(close()));
+ QAction *loadAction = new QAction("Load", QPixmap(xpm_load), "&Load", CTRL+Key_L, this);
+ connect(loadAction, SIGNAL(activated()), SLOT(loadConfig()));
+ QAction *saveAction = new QAction("Save", QPixmap(xpm_save), "&Save", CTRL+Key_S, this);
+ connect(saveAction, SIGNAL(activated()), SLOT(saveConfig()));
+ QAction *saveAsAction = new QAction("Save As...", "Save &As...", 0, this);
+ connect(saveAsAction, SIGNAL(activated()), SLOT(saveConfigAs()));
+ QAction *singleViewAction = new QAction("Single View", QPixmap(xpm_single_view), "Split View", 0, this);
+ connect(singleViewAction, SIGNAL(activated()), SLOT(showSingleView()));
+ QAction *splitViewAction = new QAction("Split View", QPixmap(xpm_split_view), "Split View", 0, this);
+ connect(splitViewAction, SIGNAL(activated()), SLOT(showSplitView()));
+ QAction *fullViewAction = new QAction("Full View", QPixmap(xpm_tree_view), "Full View", 0, this);
+ connect(fullViewAction, SIGNAL(activated()), SLOT(showFullView()));
+
+ QAction *showNameAction = new QAction(NULL, "Show Name", 0, this);
+ showNameAction->setToggleAction(TRUE);
+ showNameAction->setOn(configList->showName);
+ connect(showNameAction, SIGNAL(toggled(bool)), SLOT(setShowName(bool)));
+ QAction *showRangeAction = new QAction(NULL, "Show Range", 0, this);
+ showRangeAction->setToggleAction(TRUE);
+ showRangeAction->setOn(configList->showRange);
+ connect(showRangeAction, SIGNAL(toggled(bool)), SLOT(setShowRange(bool)));
+ QAction *showDataAction = new QAction(NULL, "Show Data", 0, this);
+ showDataAction->setToggleAction(TRUE);
+ showDataAction->setOn(configList->showData);
+ connect(showDataAction, SIGNAL(toggled(bool)), SLOT(setShowData(bool)));
+ QAction *showAllAction = new QAction(NULL, "Show All Options", 0, this);
+ showAllAction->setToggleAction(TRUE);
+ showAllAction->setOn(configList->showAll);
+ connect(showAllAction, SIGNAL(toggled(bool)), SLOT(setShowAll(bool)));
+ QAction *showDebugAction = new QAction(NULL, "Show Debug Info", 0, this);
+ showDebugAction->setToggleAction(TRUE);
+ showDebugAction->setOn(showDebug);
+ connect(showDebugAction, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
+
+ QAction *showIntroAction = new QAction(NULL, "Introduction", 0, this);
+ connect(showIntroAction, SIGNAL(activated()), SLOT(showIntro()));
+ QAction *showAboutAction = new QAction(NULL, "About", 0, this);
+ connect(showAboutAction, SIGNAL(activated()), SLOT(showAbout()));
+
+ // init tool bar
+ backAction->addTo(toolBar);
+ toolBar->addSeparator();
+ loadAction->addTo(toolBar);
+ saveAction->addTo(toolBar);
+ toolBar->addSeparator();
+ singleViewAction->addTo(toolBar);
+ splitViewAction->addTo(toolBar);
+ fullViewAction->addTo(toolBar);
+
+ // create config menu
+ QPopupMenu* config = new QPopupMenu(this);
+ menu->insertItem("&File", config);
+ loadAction->addTo(config);
+ saveAction->addTo(config);
+ saveAsAction->addTo(config);
+ config->insertSeparator();
+ quitAction->addTo(config);
+
+ // create options menu
+ QPopupMenu* optionMenu = new QPopupMenu(this);
+ menu->insertItem("&Option", optionMenu);
+ showNameAction->addTo(optionMenu);
+ showRangeAction->addTo(optionMenu);
+ showDataAction->addTo(optionMenu);
+ optionMenu->insertSeparator();
+ showAllAction->addTo(optionMenu);
+ showDebugAction->addTo(optionMenu);
+
+ // create help menu
+ QPopupMenu* helpMenu = new QPopupMenu(this);
+ menu->insertSeparator();
+ menu->insertItem("&Help", helpMenu);
+ showIntroAction->addTo(helpMenu);
+ showAboutAction->addTo(helpMenu);
+
+ connect(configList, SIGNAL(menuSelected(struct menu *)),
+ SLOT(changeMenu(struct menu *)));
+ connect(configList, SIGNAL(parentSelected()),
+ SLOT(goBack()));
+ connect(menuList, SIGNAL(menuSelected(struct menu *)),
+ SLOT(changeMenu(struct menu *)));
+
+ connect(configList, SIGNAL(gotFocus(void)),
+ SLOT(listFocusChanged(void)));
+ connect(menuList, SIGNAL(gotFocus(void)),
+ SLOT(listFocusChanged(void)));
+
+#if QT_VERSION >= 300
+ QString listMode = configSettings->readEntry("/kconfig/qconf/listMode", "symbol");
+ if (listMode == "single")
+ showSingleView();
+ else if (listMode == "full")
+ showFullView();
+ else /*if (listMode == "split")*/
+ showSplitView();
+
+ // UI setup done, restore splitter positions
+ QValueList<int> sizes = configSettings->readSizes("/kconfig/qconf/split1", &ok);
+ if (ok)
+ split1->setSizes(sizes);
+
+ sizes = configSettings->readSizes("/kconfig/qconf/split2", &ok);
+ if (ok)
+ split2->setSizes(sizes);
+#else
+ showSplitView();
+#endif
+ delete configSettings;
+}
+
+static QString print_filter(const QString &str)
+{
+ QRegExp re("[<>&\"\\n]");
+ QString res = str;
+ for (int i = 0; (i = res.find(re, i)) >= 0;) {
+ switch (res[i].latin1()) {
+ case '<':
+ res.replace(i, 1, "<");
+ i += 4;
+ break;
+ case '>':
+ res.replace(i, 1, ">");
+ i += 4;
+ break;
+ case '&':
+ res.replace(i, 1, "&");
+ i += 5;
+ break;
+ case '"':
+ res.replace(i, 1, """);
+ i += 6;
+ break;
+ case '\n':
+ res.replace(i, 1, "<br>");
+ i += 4;
+ break;
+ }
+ }
+ return res;
+}
+
+static void expr_print_help(void *data, const char *str)
+{
+ reinterpret_cast<QString*>(data)->append(print_filter(str));
+}
+
+/*
+ * display a new help entry as soon as a new menu entry is selected
+ */
+void ConfigMainWindow::setHelp(QListViewItem* item)
+{
+ struct symbol* sym;
+ struct menu* menu = 0;
+
+ configList->parent()->lineEdit->hide();
+ if (item)
+ menu = ((ConfigItem*)item)->menu;
+ if (!menu) {
+ helpText->setText(QString::null);
+ return;
+ }
+
+ QString head, debug, help;
+ menu = ((ConfigItem*)item)->menu;
+ sym = menu->sym;
+ if (sym) {
+ if (menu->prompt) {
+ head += "<big><b>";
+ head += print_filter(_(menu->prompt->text));
+ head += "</b></big>";
+ if (sym->name) {
+ head += " (";
+ head += print_filter(_(sym->name));
+ head += ")";
+ }
+ } else if (sym->name) {
+ head += "<big><b>";
+ head += print_filter(_(sym->name));
+ head += "</b></big>";
+ }
+ head += "<br><br>";
+
+ if (showDebug) {
+ debug += "type: ";
+ debug += print_filter(sym_type_name(sym->type));
+ if (sym_is_choice(sym))
+ debug += " (choice)";
+ debug += "<br>";
+ if (sym->rev_dep.expr) {
+ debug += "reverse dep: ";
+ expr_print(sym->rev_dep.expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ }
+ for (struct property *prop = sym->prop; prop; prop = prop->next) {
+ switch (prop->type) {
+ case P_PROMPT:
+ case P_MENU:
+ debug += "prompt: ";
+ debug += print_filter(_(prop->text));
+ debug += "<br>";
+ break;
+ case P_DEFAULT:
+ debug += "default: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ break;
+ case P_CHOICE:
+ if (sym_is_choice(sym)) {
+ debug += "choice: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ }
+ break;
+ case P_SELECT:
+ debug += "select: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ break;
+ case P_RANGE:
+ debug += "range: ";
+ expr_print(prop->expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ break;
+ default:
+ debug += "unknown property: ";
+ debug += prop_get_type_name(prop->type);
+ debug += "<br>";
+ }
+ if (prop->visible.expr) {
+ debug += " dep: ";
+ expr_print(prop->visible.expr, expr_print_help, &debug, E_NONE);
+ debug += "<br>";
+ }
+ }
+ debug += "<br>";
+ }
+
+ help = print_filter(_(sym->help));
+ } else if (menu->prompt) {
+ head += "<big><b>";
+ head += print_filter(_(menu->prompt->text));
+ head += "</b></big><br><br>";
+ if (showDebug) {
+ if (menu->prompt->visible.expr) {
+ debug += " dep: ";
+ expr_print(menu->prompt->visible.expr, expr_print_help, &debug, E_NONE);
+ debug += "<br><br>";
+ }
+ }
+ }
+ if (showDebug)
+ debug += QString().sprintf("defined at %s:%d<br><br>", menu->file->name, menu->lineno);
+ helpText->setText(head + debug + help);
+}
+
+void ConfigMainWindow::loadConfig(void)
+{
+ QString s = QFileDialog::getOpenFileName(".config", NULL, this);
+ if (s.isNull())
+ return;
+ if (conf_read(QFile::encodeName(s)))
+ QMessageBox::information(this, "qconf", "Unable to load configuration!");
+ ConfigView::updateListAll();
+}
+
+void ConfigMainWindow::saveConfig(void)
+{
+ if (conf_write(NULL))
+ QMessageBox::information(this, "qconf", "Unable to save configuration!");
+}
+
+void ConfigMainWindow::saveConfigAs(void)
+{
+ QString s = QFileDialog::getSaveFileName(".config", NULL, this);
+ if (s.isNull())
+ return;
+ if (conf_write(QFile::encodeName(s)))
+ QMessageBox::information(this, "qconf", "Unable to save configuration!");
+}
+
+void ConfigMainWindow::changeMenu(struct menu *menu)
+{
+ configList->setRootMenu(menu);
+ backAction->setEnabled(TRUE);
+}
+
+void ConfigMainWindow::listFocusChanged(void)
+{
+ if (menuList->hasFocus()) {
+ if (menuList->mode == menuMode)
+ configList->clearSelection();
+ setHelp(menuList->selectedItem());
+ } else if (configList->hasFocus()) {
+ setHelp(configList->selectedItem());
+ }
+}
+
+void ConfigMainWindow::goBack(void)
+{
+ ConfigItem* item;
+
+ configList->setParentMenu();
+ if (configList->rootEntry == &rootmenu)
+ backAction->setEnabled(FALSE);
+ item = (ConfigItem*)menuList->selectedItem();
+ while (item) {
+ if (item->menu == configList->rootEntry) {
+ menuList->setSelected(item, TRUE);
+ break;
+ }
+ item = (ConfigItem*)item->parent();
+ }
+}
+
+void ConfigMainWindow::showSingleView(void)
+{
+ menuView->hide();
+ menuList->setRootMenu(0);
+ configList->mode = singleMode;
+ if (configList->rootEntry == &rootmenu)
+ configList->updateListAll();
+ else
+ configList->setRootMenu(&rootmenu);
+ configList->setAllOpen(TRUE);
+ configList->setFocus();
+}
+
+void ConfigMainWindow::showSplitView(void)
+{
+ configList->mode = symbolMode;
+ if (configList->rootEntry == &rootmenu)
+ configList->updateListAll();
+ else
+ configList->setRootMenu(&rootmenu);
+ configList->setAllOpen(TRUE);
+ configApp->processEvents();
+ menuList->mode = menuMode;
+ menuList->setRootMenu(&rootmenu);
+ menuList->setAllOpen(TRUE);
+ menuView->show();
+ menuList->setFocus();
+}
+
+void ConfigMainWindow::showFullView(void)
+{
+ menuView->hide();
+ menuList->setRootMenu(0);
+ configList->mode = fullMode;
+ if (configList->rootEntry == &rootmenu)
+ configList->updateListAll();
+ else
+ configList->setRootMenu(&rootmenu);
+ configList->setAllOpen(FALSE);
+ configList->setFocus();
+}
+
+void ConfigMainWindow::setShowAll(bool b)
+{
+ if (configList->showAll == b)
+ return;
+ configList->showAll = b;
+ configList->updateListAll();
+ menuList->showAll = b;
+ menuList->updateListAll();
+}
+
+void ConfigMainWindow::setShowDebug(bool b)
+{
+ if (showDebug == b)
+ return;
+ showDebug = b;
+}
+
+void ConfigMainWindow::setShowName(bool b)
+{
+ if (configList->showName == b)
+ return;
+ configList->showName = b;
+ configList->reinit();
+ menuList->showName = b;
+ menuList->reinit();
+}
+
+void ConfigMainWindow::setShowRange(bool b)
+{
+ if (configList->showRange == b)
+ return;
+ configList->showRange = b;
+ configList->reinit();
+ menuList->showRange = b;
+ menuList->reinit();
+}
+
+void ConfigMainWindow::setShowData(bool b)
+{
+ if (configList->showData == b)
+ return;
+ configList->showData = b;
+ configList->reinit();
+ menuList->showData = b;
+ menuList->reinit();
+}
+
+/*
+ * ask for saving configuration before quitting
+ * TODO ask only when something changed
+ */
+void ConfigMainWindow::closeEvent(QCloseEvent* e)
+{
+ if (!sym_change_count) {
+ e->accept();
+ return;
+ }
+ QMessageBox mb("qconf", "Save configuration?", QMessageBox::Warning,
+ QMessageBox::Yes | QMessageBox::Default, QMessageBox::No, QMessageBox::Cancel | QMessageBox::Escape);
+ mb.setButtonText(QMessageBox::Yes, "&Save Changes");
+ mb.setButtonText(QMessageBox::No, "&Discard Changes");
+ mb.setButtonText(QMessageBox::Cancel, "Cancel Exit");
+ switch (mb.exec()) {
+ case QMessageBox::Yes:
+ conf_write(NULL);
+ case QMessageBox::No:
+ e->accept();
+ break;
+ case QMessageBox::Cancel:
+ e->ignore();
+ break;
+ }
+}
+
+void ConfigMainWindow::showIntro(void)
+{
+ static char str[] = "Welcome to the qconf graphical kernel configuration tool for Linux.\n\n"
+ "For each option, a blank box indicates the feature is disabled, a check\n"
+ "indicates it is enabled, and a dot indicates that it is to be compiled\n"
+ "as a module. Clicking on the box will cycle through the three states.\n\n"
+ "If you do not see an option (e.g., a device driver) that you believe\n"
+ "should be present, try turning on Show All Options under the Options menu.\n"
+ "Although there is no cross reference yet to help you figure out what other\n"
+ "options must be enabled to support the option you are interested in, you can\n"
+ "still view the help of a grayed-out option.\n\n"
+ "Toggling Show Debug Info under the Options menu will show the dependencies,\n"
+ "which you can then match by examining other options.\n\n";
+
+ QMessageBox::information(this, "qconf", str);
+}
+
+void ConfigMainWindow::showAbout(void)
+{
+ static char str[] = "qconf is Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>.\n\n"
+ "Bug reports and feature request can also be entered at http://bugzilla.kernel.org/\n";
+
+ QMessageBox::information(this, "qconf", str);
+}
+
+void ConfigMainWindow::saveSettings(void)
+{
+#if QT_VERSION >= 300
+ ConfigSettings *configSettings = new ConfigSettings;
+ configSettings->writeEntry("/kconfig/qconf/window x", pos().x());
+ configSettings->writeEntry("/kconfig/qconf/window y", pos().y());
+ configSettings->writeEntry("/kconfig/qconf/window width", size().width());
+ configSettings->writeEntry("/kconfig/qconf/window height", size().height());
+ configSettings->writeEntry("/kconfig/qconf/showName", configList->showName);
+ configSettings->writeEntry("/kconfig/qconf/showRange", configList->showRange);
+ configSettings->writeEntry("/kconfig/qconf/showData", configList->showData);
+ configSettings->writeEntry("/kconfig/qconf/showAll", configList->showAll);
+ configSettings->writeEntry("/kconfig/qconf/showDebug", showDebug);
+
+ QString entry;
+ switch(configList->mode) {
+ case singleMode :
+ entry = "single";
+ break;
+
+ case symbolMode :
+ entry = "split";
+ break;
+
+ case fullMode :
+ entry = "full";
+ break;
+ }
+ configSettings->writeEntry("/kconfig/qconf/listMode", entry);
+
+ configSettings->writeSizes("/kconfig/qconf/split1", split1->sizes());
+ configSettings->writeSizes("/kconfig/qconf/split2", split2->sizes());
+
+ delete configSettings;
+#endif
+}
+
+void fixup_rootmenu(struct menu *menu)
+{
+ struct menu *child;
+ static int menu_cnt = 0;
+
+ menu->flags |= MENU_ROOT;
+ for (child = menu->list; child; child = child->next) {
+ if (child->prompt && child->prompt->type == P_MENU) {
+ menu_cnt++;
+ fixup_rootmenu(child);
+ menu_cnt--;
+ } else if (!menu_cnt)
+ fixup_rootmenu(child);
+ }
+}
+
+static const char *progname;
+
+static void usage(void)
+{
+ printf("%s <config>\n", progname);
+ exit(0);
+}
+
+int main(int ac, char** av)
+{
+ ConfigMainWindow* v;
+ const char *name;
+
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+#ifndef LKC_DIRECT_LINK
+ kconfig_load();
+#endif
+
+ progname = av[0];
+ configApp = new QApplication(ac, av);
+ if (ac > 1 && av[1][0] == '-') {
+ switch (av[1][1]) {
+ case 'h':
+ case '?':
+ usage();
+ }
+ name = av[2];
+ } else
+ name = av[1];
+ if (!name)
+ usage();
+
+ conf_parse(name);
+ fixup_rootmenu(&rootmenu);
+ conf_read(NULL);
+ //zconfdump(stdout);
+
+ v = new ConfigMainWindow();
+
+ //zconfdump(stdout);
+ v->show();
+ configApp->connect(configApp, SIGNAL(lastWindowClosed()), SLOT(quit()));
+ configApp->connect(configApp, SIGNAL(aboutToQuit()), v, SLOT(saveSettings()));
+ configApp->exec();
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <qlistview.h>
+#if QT_VERSION >= 300
+#include <qsettings.h>
+#else
+class QSettings { };
+#endif
+
+class ConfigList;
+class ConfigItem;
+class ConfigLineEdit;
+class ConfigMainWindow;
+
+
+class ConfigSettings : public QSettings {
+public:
+ ConfigSettings();
+
+#if QT_VERSION >= 300
+ void readListSettings();
+ QValueList<int> readSizes(const QString& key, bool *ok);
+ bool writeSizes(const QString& key, const QValueList<int>& value);
+#endif
+
+ bool showAll;
+ bool showName;
+ bool showRange;
+ bool showData;
+};
+
+class ConfigView : public QVBox {
+ Q_OBJECT
+ typedef class QVBox Parent;
+public:
+ ConfigView(QWidget* parent, ConfigMainWindow* cview, ConfigSettings* configSettings);
+ ~ConfigView(void);
+ static void updateList(ConfigItem* item);
+ static void updateListAll(void);
+
+public:
+ ConfigList* list;
+ ConfigLineEdit* lineEdit;
+
+ static ConfigView* viewList;
+ ConfigView* nextView;
+};
+
+enum colIdx {
+ promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx, colNr
+};
+enum listMode {
+ singleMode, menuMode, symbolMode, fullMode
+};
+
+class ConfigList : public QListView {
+ Q_OBJECT
+ typedef class QListView Parent;
+public:
+ ConfigList(ConfigView* p, ConfigMainWindow* cview, ConfigSettings *configSettings);
+ void reinit(void);
+ ConfigView* parent(void) const
+ {
+ return (ConfigView*)Parent::parent();
+ }
+
+protected:
+ ConfigMainWindow* cview;
+
+ void keyPressEvent(QKeyEvent *e);
+ void contentsMousePressEvent(QMouseEvent *e);
+ void contentsMouseReleaseEvent(QMouseEvent *e);
+ void contentsMouseMoveEvent(QMouseEvent *e);
+ void contentsMouseDoubleClickEvent(QMouseEvent *e);
+ void focusInEvent(QFocusEvent *e);
+public slots:
+ void setRootMenu(struct menu *menu);
+
+ void updateList(ConfigItem *item);
+ void setValue(ConfigItem* item, tristate val);
+ void changeValue(ConfigItem* item);
+ void updateSelection(void);
+signals:
+ void menuSelected(struct menu *menu);
+ void parentSelected(void);
+ void gotFocus(void);
+
+public:
+ void updateListAll(void)
+ {
+ updateAll = true;
+ updateList(NULL);
+ updateAll = false;
+ }
+ ConfigList* listView()
+ {
+ return this;
+ }
+ ConfigItem* firstChild() const
+ {
+ return (ConfigItem *)Parent::firstChild();
+ }
+ int mapIdx(colIdx idx)
+ {
+ return colMap[idx];
+ }
+ void addColumn(colIdx idx, const QString& label)
+ {
+ colMap[idx] = Parent::addColumn(label);
+ colRevMap[colMap[idx]] = idx;
+ }
+ void removeColumn(colIdx idx)
+ {
+ int col = colMap[idx];
+ if (col >= 0) {
+ Parent::removeColumn(col);
+ colRevMap[col] = colMap[idx] = -1;
+ }
+ }
+ void setAllOpen(bool open);
+ void setParentMenu(void);
+
+ template <class P>
+ void updateMenuList(P*, struct menu*);
+
+ bool updateAll;
+
+ QPixmap symbolYesPix, symbolModPix, symbolNoPix;
+ QPixmap choiceYesPix, choiceNoPix;
+ QPixmap menuPix, menuInvPix, menuBackPix, voidPix;
+
+ bool showAll, showName, showRange, showData;
+ enum listMode mode;
+ struct menu *rootEntry;
+ QColorGroup disabledColorGroup;
+ QColorGroup inactivedColorGroup;
+
+private:
+ int colMap[colNr];
+ int colRevMap[colNr];
+};
+
+class ConfigItem : public QListViewItem {
+ typedef class QListViewItem Parent;
+public:
+ ConfigItem(QListView *parent, ConfigItem *after, struct menu *m, bool v)
+ : Parent(parent, after), menu(m), visible(v), goParent(false)
+ {
+ init();
+ }
+ ConfigItem(ConfigItem *parent, ConfigItem *after, struct menu *m, bool v)
+ : Parent(parent, after), menu(m), visible(v), goParent(false)
+ {
+ init();
+ }
+ ConfigItem(QListView *parent, ConfigItem *after, bool v)
+ : Parent(parent, after), menu(0), visible(v), goParent(true)
+ {
+ init();
+ }
+ ~ConfigItem(void);
+ void init(void);
+#if QT_VERSION >= 300
+ void okRename(int col);
+#endif
+ void updateMenu(void);
+ void testUpdateMenu(bool v);
+ ConfigList* listView() const
+ {
+ return (ConfigList*)Parent::listView();
+ }
+ ConfigItem* firstChild() const
+ {
+ return (ConfigItem *)Parent::firstChild();
+ }
+ ConfigItem* nextSibling() const
+ {
+ return (ConfigItem *)Parent::nextSibling();
+ }
+ void setText(colIdx idx, const QString& text)
+ {
+ Parent::setText(listView()->mapIdx(idx), text);
+ }
+ QString text(colIdx idx) const
+ {
+ return Parent::text(listView()->mapIdx(idx));
+ }
+ void setPixmap(colIdx idx, const QPixmap& pm)
+ {
+ Parent::setPixmap(listView()->mapIdx(idx), pm);
+ }
+ const QPixmap* pixmap(colIdx idx) const
+ {
+ return Parent::pixmap(listView()->mapIdx(idx));
+ }
+ void paintCell(QPainter* p, const QColorGroup& cg, int column, int width, int align);
+
+ ConfigItem* nextItem;
+ struct menu *menu;
+ bool visible;
+ bool goParent;
+};
+
+class ConfigLineEdit : public QLineEdit {
+ Q_OBJECT
+ typedef class QLineEdit Parent;
+public:
+ ConfigLineEdit(ConfigView* parent)
+ : Parent(parent)
+ { }
+ ConfigView* parent(void) const
+ {
+ return (ConfigView*)Parent::parent();
+ }
+ void show(ConfigItem *i);
+ void keyPressEvent(QKeyEvent *e);
+
+public:
+ ConfigItem *item;
+};
+
+class ConfigMainWindow : public QMainWindow {
+ Q_OBJECT
+public:
+ ConfigMainWindow(void);
+public slots:
+ void setHelp(QListViewItem* item);
+ void changeMenu(struct menu *);
+ void listFocusChanged(void);
+ void goBack(void);
+ void loadConfig(void);
+ void saveConfig(void);
+ void saveConfigAs(void);
+ void showSingleView(void);
+ void showSplitView(void);
+ void showFullView(void);
+ void setShowAll(bool);
+ void setShowDebug(bool);
+ void setShowRange(bool);
+ void setShowName(bool);
+ void setShowData(bool);
+ void showIntro(void);
+ void showAbout(void);
+ void saveSettings(void);
+
+protected:
+ void closeEvent(QCloseEvent *e);
+
+ ConfigView *menuView;
+ ConfigList *menuList;
+ ConfigView *configView;
+ ConfigList *configList;
+ QTextView *helpText;
+ QToolBar *toolBar;
+ QAction *backAction;
+ QSplitter* split1;
+ QSplitter* split2;
+
+ bool showDebug;
+};
--- /dev/null
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <regex.h>
+#include <sys/utsname.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+struct symbol symbol_yes = {
+ .name = "y",
+ .curr = { "y", yes },
+ .flags = SYMBOL_YES|SYMBOL_VALID,
+}, symbol_mod = {
+ .name = "m",
+ .curr = { "m", mod },
+ .flags = SYMBOL_MOD|SYMBOL_VALID,
+}, symbol_no = {
+ .name = "n",
+ .curr = { "n", no },
+ .flags = SYMBOL_NO|SYMBOL_VALID,
+}, symbol_empty = {
+ .name = "",
+ .curr = { "", no },
+ .flags = SYMBOL_VALID,
+};
+
+int sym_change_count;
+struct symbol *modules_sym;
+tristate modules_val;
+
+void sym_add_default(struct symbol *sym, const char *def)
+{
+ struct property *prop = prop_alloc(P_DEFAULT, sym);
+
+ prop->expr = expr_alloc_symbol(sym_lookup(def, 1));
+}
+
+void sym_init(void)
+{
+ struct symbol *sym;
+ struct utsname uts;
+ char *p;
+ static bool inited = false;
+
+ if (inited)
+ return;
+ inited = true;
+
+ uname(&uts);
+
+ sym = sym_lookup("ARCH", 0);
+ sym->type = S_STRING;
+ sym->flags |= SYMBOL_AUTO;
+ p = getenv("ARCH");
+ if (p)
+ sym_add_default(sym, p);
+
+ sym = sym_lookup("KERNELVERSION", 0);
+ sym->type = S_STRING;
+ sym->flags |= SYMBOL_AUTO;
+ p = getenv("KERNELVERSION");
+ if (p)
+ sym_add_default(sym, p);
+
+ sym = sym_lookup("UNAME_RELEASE", 0);
+ sym->type = S_STRING;
+ sym->flags |= SYMBOL_AUTO;
+ sym_add_default(sym, uts.release);
+}
+
+enum symbol_type sym_get_type(struct symbol *sym)
+{
+ enum symbol_type type = sym->type;
+
+ if (type == S_TRISTATE) {
+ if (sym_is_choice_value(sym) && sym->visible == yes)
+ type = S_BOOLEAN;
+ else if (modules_val == no)
+ type = S_BOOLEAN;
+ }
+ return type;
+}
+
+const char *sym_type_name(enum symbol_type type)
+{
+ switch (type) {
+ case S_BOOLEAN:
+ return "boolean";
+ case S_TRISTATE:
+ return "tristate";
+ case S_INT:
+ return "integer";
+ case S_HEX:
+ return "hex";
+ case S_STRING:
+ return "string";
+ case S_UNKNOWN:
+ return "unknown";
+ case S_OTHER:
+ break;
+ }
+ return "???";
+}
+
+struct property *sym_get_choice_prop(struct symbol *sym)
+{
+ struct property *prop;
+
+ for_all_choices(sym, prop)
+ return prop;
+ return NULL;
+}
+
+struct property *sym_get_default_prop(struct symbol *sym)
+{
+ struct property *prop;
+
+ for_all_defaults(sym, prop) {
+ prop->visible.tri = expr_calc_value(prop->visible.expr);
+ if (prop->visible.tri != no)
+ return prop;
+ }
+ return NULL;
+}
+
+struct property *sym_get_range_prop(struct symbol *sym)
+{
+ struct property *prop;
+
+ for_all_properties(sym, prop, P_RANGE) {
+ prop->visible.tri = expr_calc_value(prop->visible.expr);
+ if (prop->visible.tri != no)
+ return prop;
+ }
+ return NULL;
+}
+
+static int sym_get_range_val(struct symbol *sym, int base)
+{
+ sym_calc_value(sym);
+ switch (sym->type) {
+ case S_INT:
+ base = 10;
+ break;
+ case S_HEX:
+ base = 16;
+ break;
+ default:
+ break;
+ }
+ return strtol(sym->curr.val, NULL, base);
+}
+
+static void sym_validate_range(struct symbol *sym)
+{
+ struct property *prop;
+ int base, val, val2;
+ char str[64];
+
+ switch (sym->type) {
+ case S_INT:
+ base = 10;
+ break;
+ case S_HEX:
+ base = 16;
+ break;
+ default:
+ return;
+ }
+ prop = sym_get_range_prop(sym);
+ if (!prop)
+ return;
+ val = strtol(sym->curr.val, NULL, base);
+ val2 = sym_get_range_val(prop->expr->left.sym, base);
+ if (val >= val2) {
+ val2 = sym_get_range_val(prop->expr->right.sym, base);
+ if (val <= val2)
+ return;
+ }
+ if (sym->type == S_INT)
+ sprintf(str, "%d", val2);
+ else
+ sprintf(str, "0x%x", val2);
+ sym->curr.val = strdup(str);
+}
+
+static void sym_calc_visibility(struct symbol *sym)
+{
+ struct property *prop;
+ tristate tri;
+
+ /* any prompt visible? */
+ tri = no;
+ for_all_prompts(sym, prop) {
+ prop->visible.tri = expr_calc_value(prop->visible.expr);
+ tri = E_OR(tri, prop->visible.tri);
+ }
+ if (tri == mod && (sym->type != S_TRISTATE || modules_val == no))
+ tri = yes;
+ if (sym->visible != tri) {
+ sym->visible = tri;
+ sym_set_changed(sym);
+ }
+ if (sym_is_choice_value(sym))
+ return;
+ tri = no;
+ if (sym->rev_dep.expr)
+ tri = expr_calc_value(sym->rev_dep.expr);
+ if (tri == mod && sym_get_type(sym) == S_BOOLEAN)
+ tri = yes;
+ if (sym->rev_dep.tri != tri) {
+ sym->rev_dep.tri = tri;
+ sym_set_changed(sym);
+ }
+}
+
+static struct symbol *sym_calc_choice(struct symbol *sym)
+{
+ struct symbol *def_sym;
+ struct property *prop;
+ struct expr *e;
+
+ /* is the user choice visible? */
+ def_sym = sym->user.val;
+ if (def_sym) {
+ sym_calc_visibility(def_sym);
+ if (def_sym->visible != no)
+ return def_sym;
+ }
+
+ /* any of the defaults visible? */
+ for_all_defaults(sym, prop) {
+ prop->visible.tri = expr_calc_value(prop->visible.expr);
+ if (prop->visible.tri == no)
+ continue;
+ def_sym = prop_get_symbol(prop);
+ sym_calc_visibility(def_sym);
+ if (def_sym->visible != no)
+ return def_sym;
+ }
+
+ /* just get the first visible value */
+ prop = sym_get_choice_prop(sym);
+ for (e = prop->expr; e; e = e->left.expr) {
+ def_sym = e->right.sym;
+ sym_calc_visibility(def_sym);
+ if (def_sym->visible != no)
+ return def_sym;
+ }
+
+ /* no choice? reset tristate value */
+ sym->curr.tri = no;
+ return NULL;
+}
+
+void sym_calc_value(struct symbol *sym)
+{
+ struct symbol_value newval, oldval;
+ struct property *prop;
+ struct expr *e;
+
+ if (!sym)
+ return;
+
+ if (sym->flags & SYMBOL_VALID)
+ return;
+ sym->flags |= SYMBOL_VALID;
+
+ oldval = sym->curr;
+
+ switch (sym->type) {
+ case S_INT:
+ case S_HEX:
+ case S_STRING:
+ newval = symbol_empty.curr;
+ break;
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ newval = symbol_no.curr;
+ break;
+ default:
+ sym->curr.val = sym->name;
+ sym->curr.tri = no;
+ return;
+ }
+ if (!sym_is_choice_value(sym))
+ sym->flags &= ~SYMBOL_WRITE;
+
+ sym_calc_visibility(sym);
+
+ /* set default if recursively called */
+ sym->curr = newval;
+
+ switch (sym_get_type(sym)) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ if (sym_is_choice_value(sym) && sym->visible == yes) {
+ prop = sym_get_choice_prop(sym);
+ newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no;
+ } else if (E_OR(sym->visible, sym->rev_dep.tri) != no) {
+ sym->flags |= SYMBOL_WRITE;
+ if (sym_has_value(sym))
+ newval.tri = sym->user.tri;
+ else if (!sym_is_choice(sym)) {
+ prop = sym_get_default_prop(sym);
+ if (prop)
+ newval.tri = expr_calc_value(prop->expr);
+ }
+ newval.tri = E_OR(E_AND(newval.tri, sym->visible), sym->rev_dep.tri);
+ } else if (!sym_is_choice(sym)) {
+ prop = sym_get_default_prop(sym);
+ if (prop) {
+ sym->flags |= SYMBOL_WRITE;
+ newval.tri = expr_calc_value(prop->expr);
+ }
+ }
+ if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN)
+ newval.tri = yes;
+ break;
+ case S_STRING:
+ case S_HEX:
+ case S_INT:
+ if (sym->visible != no) {
+ sym->flags |= SYMBOL_WRITE;
+ if (sym_has_value(sym)) {
+ newval.val = sym->user.val;
+ break;
+ }
+ }
+ prop = sym_get_default_prop(sym);
+ if (prop) {
+ struct symbol *ds = prop_get_symbol(prop);
+ if (ds) {
+ sym->flags |= SYMBOL_WRITE;
+ sym_calc_value(ds);
+ newval.val = ds->curr.val;
+ }
+ }
+ break;
+ default:
+ ;
+ }
+
+ sym->curr = newval;
+ if (sym_is_choice(sym) && newval.tri == yes)
+ sym->curr.val = sym_calc_choice(sym);
+ sym_validate_range(sym);
+
+ if (memcmp(&oldval, &sym->curr, sizeof(oldval)))
+ sym_set_changed(sym);
+ if (modules_sym == sym)
+ modules_val = modules_sym->curr.tri;
+
+ if (sym_is_choice(sym)) {
+ int flags = sym->flags & (SYMBOL_CHANGED | SYMBOL_WRITE);
+ prop = sym_get_choice_prop(sym);
+ for (e = prop->expr; e; e = e->left.expr) {
+ e->right.sym->flags |= flags;
+ if (flags & SYMBOL_CHANGED)
+ sym_set_changed(e->right.sym);
+ }
+ }
+}
+
+void sym_clear_all_valid(void)
+{
+ struct symbol *sym;
+ int i;
+
+ for_all_symbols(i, sym)
+ sym->flags &= ~SYMBOL_VALID;
+ sym_change_count++;
+ if (modules_sym)
+ sym_calc_value(modules_sym);
+}
+
+void sym_set_changed(struct symbol *sym)
+{
+ struct property *prop;
+
+ sym->flags |= SYMBOL_CHANGED;
+ for (prop = sym->prop; prop; prop = prop->next) {
+ if (prop->menu)
+ prop->menu->flags |= MENU_CHANGED;
+ }
+}
+
+void sym_set_all_changed(void)
+{
+ struct symbol *sym;
+ int i;
+
+ for_all_symbols(i, sym)
+ sym_set_changed(sym);
+}
+
+bool sym_tristate_within_range(struct symbol *sym, tristate val)
+{
+ int type = sym_get_type(sym);
+
+ if (sym->visible == no)
+ return false;
+
+ if (type != S_BOOLEAN && type != S_TRISTATE)
+ return false;
+
+ if (type == S_BOOLEAN && val == mod)
+ return false;
+ if (sym->visible <= sym->rev_dep.tri)
+ return false;
+ if (sym_is_choice_value(sym) && sym->visible == yes)
+ return val == yes;
+ return val >= sym->rev_dep.tri && val <= sym->visible;
+}
+
+bool sym_set_tristate_value(struct symbol *sym, tristate val)
+{
+ tristate oldval = sym_get_tristate_value(sym);
+
+ if (oldval != val && !sym_tristate_within_range(sym, val))
+ return false;
+
+ if (sym->flags & SYMBOL_NEW) {
+ sym->flags &= ~SYMBOL_NEW;
+ sym_set_changed(sym);
+ }
+ /*
+ * setting a choice value also resets the new flag of the choice
+ * symbol and all other choice values.
+ */
+ if (sym_is_choice_value(sym) && val == yes) {
+ struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
+ struct property *prop;
+ struct expr *e;
+
+ cs->user.val = sym;
+ cs->flags &= ~SYMBOL_NEW;
+ prop = sym_get_choice_prop(cs);
+ for (e = prop->expr; e; e = e->left.expr) {
+ if (e->right.sym->visible != no)
+ e->right.sym->flags &= ~SYMBOL_NEW;
+ }
+ }
+
+ sym->user.tri = val;
+ if (oldval != val) {
+ sym_clear_all_valid();
+ if (sym == modules_sym)
+ sym_set_all_changed();
+ }
+
+ return true;
+}
+
+tristate sym_toggle_tristate_value(struct symbol *sym)
+{
+ tristate oldval, newval;
+
+ oldval = newval = sym_get_tristate_value(sym);
+ do {
+ switch (newval) {
+ case no:
+ newval = mod;
+ break;
+ case mod:
+ newval = yes;
+ break;
+ case yes:
+ newval = no;
+ break;
+ }
+ if (sym_set_tristate_value(sym, newval))
+ break;
+ } while (oldval != newval);
+ return newval;
+}
+
+bool sym_string_valid(struct symbol *sym, const char *str)
+{
+ signed char ch;
+
+ switch (sym->type) {
+ case S_STRING:
+ return true;
+ case S_INT:
+ ch = *str++;
+ if (ch == '-')
+ ch = *str++;
+ if (!isdigit(ch))
+ return false;
+ if (ch == '0' && *str != 0)
+ return false;
+ while ((ch = *str++)) {
+ if (!isdigit(ch))
+ return false;
+ }
+ return true;
+ case S_HEX:
+ if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X'))
+ str += 2;
+ ch = *str++;
+ do {
+ if (!isxdigit(ch))
+ return false;
+ } while ((ch = *str++));
+ return true;
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ switch (str[0]) {
+ case 'y': case 'Y':
+ case 'm': case 'M':
+ case 'n': case 'N':
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+bool sym_string_within_range(struct symbol *sym, const char *str)
+{
+ struct property *prop;
+ int val;
+
+ switch (sym->type) {
+ case S_STRING:
+ return sym_string_valid(sym, str);
+ case S_INT:
+ if (!sym_string_valid(sym, str))
+ return false;
+ prop = sym_get_range_prop(sym);
+ if (!prop)
+ return true;
+ val = strtol(str, NULL, 10);
+ return val >= sym_get_range_val(prop->expr->left.sym, 10) &&
+ val <= sym_get_range_val(prop->expr->right.sym, 10);
+ case S_HEX:
+ if (!sym_string_valid(sym, str))
+ return false;
+ prop = sym_get_range_prop(sym);
+ if (!prop)
+ return true;
+ val = strtol(str, NULL, 16);
+ return val >= sym_get_range_val(prop->expr->left.sym, 16) &&
+ val <= sym_get_range_val(prop->expr->right.sym, 16);
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ switch (str[0]) {
+ case 'y': case 'Y':
+ return sym_tristate_within_range(sym, yes);
+ case 'm': case 'M':
+ return sym_tristate_within_range(sym, mod);
+ case 'n': case 'N':
+ return sym_tristate_within_range(sym, no);
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+bool sym_set_string_value(struct symbol *sym, const char *newval)
+{
+ const char *oldval;
+ char *val;
+ int size;
+
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ switch (newval[0]) {
+ case 'y': case 'Y':
+ return sym_set_tristate_value(sym, yes);
+ case 'm': case 'M':
+ return sym_set_tristate_value(sym, mod);
+ case 'n': case 'N':
+ return sym_set_tristate_value(sym, no);
+ }
+ return false;
+ default:
+ ;
+ }
+
+ if (!sym_string_within_range(sym, newval))
+ return false;
+
+ if (sym->flags & SYMBOL_NEW) {
+ sym->flags &= ~SYMBOL_NEW;
+ sym_set_changed(sym);
+ }
+
+ oldval = sym->user.val;
+ size = strlen(newval) + 1;
+ if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) {
+ size += 2;
+ sym->user.val = val = malloc(size);
+ *val++ = '0';
+ *val++ = 'x';
+ } else if (!oldval || strcmp(oldval, newval))
+ sym->user.val = val = malloc(size);
+ else
+ return true;
+
+ strcpy(val, newval);
+ free((void *)oldval);
+ sym_clear_all_valid();
+
+ return true;
+}
+
+const char *sym_get_string_value(struct symbol *sym)
+{
+ tristate val;
+
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ val = sym_get_tristate_value(sym);
+ switch (val) {
+ case no:
+ return "n";
+ case mod:
+ return "m";
+ case yes:
+ return "y";
+ }
+ break;
+ default:
+ ;
+ }
+ return (const char *)sym->curr.val;
+}
+
+bool sym_is_changable(struct symbol *sym)
+{
+ return sym->visible > sym->rev_dep.tri;
+}
+
+struct symbol *sym_lookup(const char *name, int isconst)
+{
+ struct symbol *symbol;
+ const char *ptr;
+ char *new_name;
+ int hash = 0;
+
+ if (name) {
+ if (name[0] && !name[1]) {
+ switch (name[0]) {
+ case 'y': return &symbol_yes;
+ case 'm': return &symbol_mod;
+ case 'n': return &symbol_no;
+ }
+ }
+ for (ptr = name; *ptr; ptr++)
+ hash += *ptr;
+ hash &= 0xff;
+
+ for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) {
+ if (!strcmp(symbol->name, name)) {
+ if ((isconst && symbol->flags & SYMBOL_CONST) ||
+ (!isconst && !(symbol->flags & SYMBOL_CONST)))
+ return symbol;
+ }
+ }
+ new_name = strdup(name);
+ } else {
+ new_name = NULL;
+ hash = 256;
+ }
+
+ symbol = malloc(sizeof(*symbol));
+ memset(symbol, 0, sizeof(*symbol));
+ symbol->name = new_name;
+ symbol->type = S_UNKNOWN;
+ symbol->flags = SYMBOL_NEW;
+ if (isconst)
+ symbol->flags |= SYMBOL_CONST;
+
+ symbol->next = symbol_hash[hash];
+ symbol_hash[hash] = symbol;
+
+ return symbol;
+}
+
+struct symbol *sym_find(const char *name)
+{
+ struct symbol *symbol = NULL;
+ const char *ptr;
+ int hash = 0;
+
+ if (!name)
+ return NULL;
+
+ if (name[0] && !name[1]) {
+ switch (name[0]) {
+ case 'y': return &symbol_yes;
+ case 'm': return &symbol_mod;
+ case 'n': return &symbol_no;
+ }
+ }
+ for (ptr = name; *ptr; ptr++)
+ hash += *ptr;
+ hash &= 0xff;
+
+ for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) {
+ if (!strcmp(symbol->name, name) &&
+ !(symbol->flags & SYMBOL_CONST))
+ break;
+ }
+
+ return symbol;
+}
+
+struct symbol **sym_re_search(const char *pattern)
+{
+ struct symbol *sym, **sym_arr = NULL;
+ int i, cnt, size;
+ regex_t re;
+
+ cnt = size = 0;
+ /* Skip if empty */
+ if (strlen(pattern) == 0)
+ return NULL;
+ if (regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB|REG_ICASE))
+ return NULL;
+
+ for_all_symbols(i, sym) {
+ if (sym->flags & SYMBOL_CONST || !sym->name)
+ continue;
+ if (regexec(&re, sym->name, 0, NULL, 0))
+ continue;
+ if (cnt + 1 >= size) {
+ void *tmp = sym_arr;
+ size += 16;
+ sym_arr = realloc(sym_arr, size * sizeof(struct symbol *));
+ if (!sym_arr) {
+ free(tmp);
+ return NULL;
+ }
+ }
+ sym_arr[cnt++] = sym;
+ }
+ if (sym_arr)
+ sym_arr[cnt] = NULL;
+ regfree(&re);
+
+ return sym_arr;
+}
+
+
+struct symbol *sym_check_deps(struct symbol *sym);
+
+static struct symbol *sym_check_expr_deps(struct expr *e)
+{
+ struct symbol *sym;
+
+ if (!e)
+ return NULL;
+ switch (e->type) {
+ case E_OR:
+ case E_AND:
+ sym = sym_check_expr_deps(e->left.expr);
+ if (sym)
+ return sym;
+ return sym_check_expr_deps(e->right.expr);
+ case E_NOT:
+ return sym_check_expr_deps(e->left.expr);
+ case E_EQUAL:
+ case E_UNEQUAL:
+ sym = sym_check_deps(e->left.sym);
+ if (sym)
+ return sym;
+ return sym_check_deps(e->right.sym);
+ case E_SYMBOL:
+ return sym_check_deps(e->left.sym);
+ default:
+ break;
+ }
+ printf("Oops! How to check %d?\n", e->type);
+ return NULL;
+}
+
+struct symbol *sym_check_deps(struct symbol *sym)
+{
+ struct symbol *sym2;
+ struct property *prop;
+
+ if (sym->flags & SYMBOL_CHECK) {
+ printf("Warning! Found recursive dependency: %s", sym->name);
+ return sym;
+ }
+ if (sym->flags & SYMBOL_CHECKED)
+ return NULL;
+
+ sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
+ sym2 = sym_check_expr_deps(sym->rev_dep.expr);
+ if (sym2)
+ goto out;
+
+ for (prop = sym->prop; prop; prop = prop->next) {
+ if (prop->type == P_CHOICE || prop->type == P_SELECT)
+ continue;
+ sym2 = sym_check_expr_deps(prop->visible.expr);
+ if (sym2)
+ goto out;
+ if (prop->type != P_DEFAULT || sym_is_choice(sym))
+ continue;
+ sym2 = sym_check_expr_deps(prop->expr);
+ if (sym2)
+ goto out;
+ }
+out:
+ if (sym2) {
+ printf(" %s", sym->name);
+ if (sym2 == sym) {
+ printf("\n");
+ sym2 = NULL;
+ }
+ }
+ sym->flags &= ~SYMBOL_CHECK;
+ return sym2;
+}
+
+struct property *prop_alloc(enum prop_type type, struct symbol *sym)
+{
+ struct property *prop;
+ struct property **propp;
+
+ prop = malloc(sizeof(*prop));
+ memset(prop, 0, sizeof(*prop));
+ prop->type = type;
+ prop->sym = sym;
+ prop->file = current_file;
+ prop->lineno = zconf_lineno();
+
+ /* append property to the prop list of symbol */
+ if (sym) {
+ for (propp = &sym->prop; *propp; propp = &(*propp)->next)
+ ;
+ *propp = prop;
+ }
+
+ return prop;
+}
+
+struct symbol *prop_get_symbol(struct property *prop)
+{
+ if (prop->expr && (prop->expr->type == E_SYMBOL ||
+ prop->expr->type == E_CHOICE))
+ return prop->expr->left.sym;
+ return NULL;
+}
+
+const char *prop_get_type_name(enum prop_type type)
+{
+ switch (type) {
+ case P_PROMPT:
+ return "prompt";
+ case P_COMMENT:
+ return "comment";
+ case P_MENU:
+ return "menu";
+ case P_DEFAULT:
+ return "default";
+ case P_CHOICE:
+ return "choice";
+ case P_SELECT:
+ return "select";
+ case P_RANGE:
+ return "range";
+ case P_UNKNOWN:
+ break;
+ }
+ return "unknown";
+}
--- /dev/null
+/*
+ * Copyright (C) 2002-2005 Roman Zippel <zippel@linux-m68k.org>
+ * Copyright (C) 2002-2005 Sam Ravnborg <sam@ravnborg.org>
+ *
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <string.h>
+#include "lkc.h"
+
+/* file already present in list? If not add it */
+struct file *file_lookup(const char *name)
+{
+ struct file *file;
+
+ for (file = file_list; file; file = file->next) {
+ if (!strcmp(name, file->name))
+ return file;
+ }
+
+ file = malloc(sizeof(*file));
+ memset(file, 0, sizeof(*file));
+ file->name = strdup(name);
+ file->next = file_list;
+ file_list = file;
+ return file;
+}
+
+/* write a dependency file as used by kbuild to track dependencies */
+int file_write_dep(const char *name)
+{
+ struct file *file;
+ FILE *out;
+
+ if (!name)
+ name = ".kconfig.d";
+ out = fopen("..config.tmp", "w");
+ if (!out)
+ return 1;
+ fprintf(out, "deps_config := \\\n");
+ for (file = file_list; file; file = file->next) {
+ if (file->next)
+ fprintf(out, "\t%s \\\n", file->name);
+ else
+ fprintf(out, "\t%s\n", file->name);
+ }
+ fprintf(out, "\n.config include/lwk/autoconf.h: $(deps_config)\n\n$(deps_config):\n");
+ fclose(out);
+ rename("..config.tmp", name);
+ return 0;
+}
+
+
+/* Allocate initial growable sting */
+struct gstr str_new(void)
+{
+ struct gstr gs;
+ gs.s = malloc(sizeof(char) * 64);
+ gs.len = 16;
+ strcpy(gs.s, "\0");
+ return gs;
+}
+
+/* Allocate and assign growable string */
+struct gstr str_assign(const char *s)
+{
+ struct gstr gs;
+ gs.s = strdup(s);
+ gs.len = strlen(s) + 1;
+ return gs;
+}
+
+/* Free storage for growable string */
+void str_free(struct gstr *gs)
+{
+ if (gs->s)
+ free(gs->s);
+ gs->s = NULL;
+ gs->len = 0;
+}
+
+/* Append to growable string */
+void str_append(struct gstr *gs, const char *s)
+{
+ size_t l = strlen(gs->s) + strlen(s) + 1;
+ if (l > gs->len) {
+ gs->s = realloc(gs->s, l);
+ gs->len = l;
+ }
+ strcat(gs->s, s);
+}
+
+/* Append printf formatted string to growable string */
+void str_printf(struct gstr *gs, const char *fmt, ...)
+{
+ va_list ap;
+ char s[10000]; /* big enough... */
+ va_start(ap, fmt);
+ vsnprintf(s, sizeof(s), fmt, ap);
+ str_append(gs, s);
+ va_end(ap);
+}
+
+/* Retrieve value of growable string */
+const char *str_get(struct gstr *gs)
+{
+ return gs->s;
+}
+
--- /dev/null
+%language=ANSI-C
+%define hash-function-name kconf_id_hash
+%define lookup-function-name kconf_id_lookup
+%define string-pool-name kconf_id_strings
+%compare-strncmp
+%enum
+%pic
+%struct-type
+
+struct kconf_id;
+
+%%
+mainmenu, T_MAINMENU, TF_COMMAND
+menu, T_MENU, TF_COMMAND
+endmenu, T_ENDMENU, TF_COMMAND
+source, T_SOURCE, TF_COMMAND
+choice, T_CHOICE, TF_COMMAND
+endchoice, T_ENDCHOICE, TF_COMMAND
+comment, T_COMMENT, TF_COMMAND
+config, T_CONFIG, TF_COMMAND
+menuconfig, T_MENUCONFIG, TF_COMMAND
+help, T_HELP, TF_COMMAND
+if, T_IF, TF_COMMAND|TF_PARAM
+endif, T_ENDIF, TF_COMMAND
+depends, T_DEPENDS, TF_COMMAND
+requires, T_REQUIRES, TF_COMMAND
+optional, T_OPTIONAL, TF_COMMAND
+default, T_DEFAULT, TF_COMMAND, S_UNKNOWN
+prompt, T_PROMPT, TF_COMMAND
+tristate, T_TYPE, TF_COMMAND, S_TRISTATE
+def_tristate, T_DEFAULT, TF_COMMAND, S_TRISTATE
+bool, T_TYPE, TF_COMMAND, S_BOOLEAN
+boolean, T_TYPE, TF_COMMAND, S_BOOLEAN
+def_bool, T_DEFAULT, TF_COMMAND, S_BOOLEAN
+def_boolean, T_DEFAULT, TF_COMMAND, S_BOOLEAN
+int, T_TYPE, TF_COMMAND, S_INT
+hex, T_TYPE, TF_COMMAND, S_HEX
+string, T_TYPE, TF_COMMAND, S_STRING
+select, T_SELECT, TF_COMMAND
+enable, T_SELECT, TF_COMMAND
+range, T_RANGE, TF_COMMAND
+on, T_ON, TF_PARAM
+%%
--- /dev/null
+/* ANSI-C code produced by gperf version 3.0.1 */
+/* Command-line: gperf */
+/* Computed positions: -k'1,3' */
+
+#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
+ && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
+ && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \
+ && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \
+ && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \
+ && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \
+ && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \
+ && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \
+ && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \
+ && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \
+ && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \
+ && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \
+ && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \
+ && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \
+ && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \
+ && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \
+ && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \
+ && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \
+ && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \
+ && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \
+ && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \
+ && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \
+ && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126))
+/* The character set is not based on ISO-646. */
+#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gnu-gperf@gnu.org>."
+#endif
+
+struct kconf_id;
+/* maximum key range = 45, duplicates = 0 */
+
+#ifdef __GNUC__
+__inline
+#else
+#ifdef __cplusplus
+inline
+#endif
+#endif
+static unsigned int
+kconf_id_hash (register const char *str, register unsigned int len)
+{
+ static unsigned char asso_values[] =
+ {
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 25, 10, 15,
+ 0, 0, 5, 47, 0, 0, 47, 47, 0, 10,
+ 0, 20, 20, 20, 5, 0, 0, 20, 47, 47,
+ 20, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47
+ };
+ register int hval = len;
+
+ switch (hval)
+ {
+ default:
+ hval += asso_values[(unsigned char)str[2]];
+ /*FALLTHROUGH*/
+ case 2:
+ case 1:
+ hval += asso_values[(unsigned char)str[0]];
+ break;
+ }
+ return hval;
+}
+
+struct kconf_id_strings_t
+ {
+ char kconf_id_strings_str2[sizeof("if")];
+ char kconf_id_strings_str3[sizeof("int")];
+ char kconf_id_strings_str4[sizeof("help")];
+ char kconf_id_strings_str5[sizeof("endif")];
+ char kconf_id_strings_str6[sizeof("select")];
+ char kconf_id_strings_str7[sizeof("endmenu")];
+ char kconf_id_strings_str8[sizeof("tristate")];
+ char kconf_id_strings_str9[sizeof("endchoice")];
+ char kconf_id_strings_str10[sizeof("range")];
+ char kconf_id_strings_str11[sizeof("string")];
+ char kconf_id_strings_str12[sizeof("default")];
+ char kconf_id_strings_str13[sizeof("def_bool")];
+ char kconf_id_strings_str14[sizeof("menu")];
+ char kconf_id_strings_str16[sizeof("def_boolean")];
+ char kconf_id_strings_str17[sizeof("def_tristate")];
+ char kconf_id_strings_str18[sizeof("mainmenu")];
+ char kconf_id_strings_str20[sizeof("menuconfig")];
+ char kconf_id_strings_str21[sizeof("config")];
+ char kconf_id_strings_str22[sizeof("on")];
+ char kconf_id_strings_str23[sizeof("hex")];
+ char kconf_id_strings_str26[sizeof("source")];
+ char kconf_id_strings_str27[sizeof("depends")];
+ char kconf_id_strings_str28[sizeof("optional")];
+ char kconf_id_strings_str31[sizeof("enable")];
+ char kconf_id_strings_str32[sizeof("comment")];
+ char kconf_id_strings_str33[sizeof("requires")];
+ char kconf_id_strings_str34[sizeof("bool")];
+ char kconf_id_strings_str37[sizeof("boolean")];
+ char kconf_id_strings_str41[sizeof("choice")];
+ char kconf_id_strings_str46[sizeof("prompt")];
+ };
+static struct kconf_id_strings_t kconf_id_strings_contents =
+ {
+ "if",
+ "int",
+ "help",
+ "endif",
+ "select",
+ "endmenu",
+ "tristate",
+ "endchoice",
+ "range",
+ "string",
+ "default",
+ "def_bool",
+ "menu",
+ "def_boolean",
+ "def_tristate",
+ "mainmenu",
+ "menuconfig",
+ "config",
+ "on",
+ "hex",
+ "source",
+ "depends",
+ "optional",
+ "enable",
+ "comment",
+ "requires",
+ "bool",
+ "boolean",
+ "choice",
+ "prompt"
+ };
+#define kconf_id_strings ((const char *) &kconf_id_strings_contents)
+#ifdef __GNUC__
+__inline
+#endif
+struct kconf_id *
+kconf_id_lookup (register const char *str, register unsigned int len)
+{
+ enum
+ {
+ TOTAL_KEYWORDS = 30,
+ MIN_WORD_LENGTH = 2,
+ MAX_WORD_LENGTH = 12,
+ MIN_HASH_VALUE = 2,
+ MAX_HASH_VALUE = 46
+ };
+
+ static struct kconf_id wordlist[] =
+ {
+ {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str2, T_IF, TF_COMMAND|TF_PARAM},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str3, T_TYPE, TF_COMMAND, S_INT},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str4, T_HELP, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str5, T_ENDIF, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str6, T_SELECT, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str7, T_ENDMENU, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str8, T_TYPE, TF_COMMAND, S_TRISTATE},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str9, T_ENDCHOICE, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str10, T_RANGE, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str11, T_TYPE, TF_COMMAND, S_STRING},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_UNKNOWN},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_MENU, TF_COMMAND},
+ {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str16, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_DEFAULT, TF_COMMAND, S_TRISTATE},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_MAINMENU, TF_COMMAND},
+ {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str20, T_MENUCONFIG, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_CONFIG, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ON, TF_PARAM},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_TYPE, TF_COMMAND, S_HEX},
+ {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str26, T_SOURCE, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_DEPENDS, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_OPTIONAL, TF_COMMAND},
+ {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_SELECT, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_REQUIRES, TF_COMMAND},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str34, T_TYPE, TF_COMMAND, S_BOOLEAN},
+ {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str37, T_TYPE, TF_COMMAND, S_BOOLEAN},
+ {-1}, {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_CHOICE, TF_COMMAND},
+ {-1}, {-1}, {-1}, {-1},
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_PROMPT, TF_COMMAND}
+ };
+
+ if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
+ {
+ register int key = kconf_id_hash (str, len);
+
+ if (key <= MAX_HASH_VALUE && key >= 0)
+ {
+ register int o = wordlist[key].name;
+ if (o >= 0)
+ {
+ register const char *s = o + kconf_id_strings;
+
+ if (*str == *s && !strncmp (str + 1, s + 1, len - 1) && s[len] == '\0')
+ return &wordlist[key];
+ }
+ }
+ }
+ return 0;
+}
+
--- /dev/null
+%option backup nostdinit noyywrap never-interactive full ecs
+%option 8bit backup nodefault perf-report perf-report
+%x COMMAND HELP STRING PARAM
+%{
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+#define START_STRSIZE 16
+
+static struct {
+ struct file *file;
+ int lineno;
+} current_pos;
+
+static char *text;
+static int text_size, text_asize;
+
+struct buffer {
+ struct buffer *parent;
+ YY_BUFFER_STATE state;
+};
+
+struct buffer *current_buf;
+
+static int last_ts, first_ts;
+
+static void zconf_endhelp(void);
+static void zconf_endfile(void);
+
+void new_string(void)
+{
+ text = malloc(START_STRSIZE);
+ text_asize = START_STRSIZE;
+ text_size = 0;
+ *text = 0;
+}
+
+void append_string(const char *str, int size)
+{
+ int new_size = text_size + size + 1;
+ if (new_size > text_asize) {
+ new_size += START_STRSIZE - 1;
+ new_size &= -START_STRSIZE;
+ text = realloc(text, new_size);
+ text_asize = new_size;
+ }
+ memcpy(text + text_size, str, size);
+ text_size += size;
+ text[text_size] = 0;
+}
+
+void alloc_string(const char *str, int size)
+{
+ text = malloc(size + 1);
+ memcpy(text, str, size);
+ text[size] = 0;
+}
+%}
+
+ws [ \n\t]
+n [A-Za-z0-9_]
+
+%%
+ int str = 0;
+ int ts, i;
+
+[ \t]*#.*\n |
+[ \t]*\n {
+ current_file->lineno++;
+ return T_EOL;
+}
+[ \t]*#.*
+
+
+[ \t]+ {
+ BEGIN(COMMAND);
+}
+
+. {
+ unput(yytext[0]);
+ BEGIN(COMMAND);
+}
+
+
+<COMMAND>{
+ {n}+ {
+ struct kconf_id *id = kconf_id_lookup(yytext, yyleng);
+ BEGIN(PARAM);
+ current_pos.file = current_file;
+ current_pos.lineno = current_file->lineno;
+ if (id && id->flags & TF_COMMAND) {
+ zconflval.id = id;
+ return id->token;
+ }
+ alloc_string(yytext, yyleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ .
+ \n {
+ BEGIN(INITIAL);
+ current_file->lineno++;
+ return T_EOL;
+ }
+}
+
+<PARAM>{
+ "&&" return T_AND;
+ "||" return T_OR;
+ "(" return T_OPEN_PAREN;
+ ")" return T_CLOSE_PAREN;
+ "!" return T_NOT;
+ "=" return T_EQUAL;
+ "!=" return T_UNEQUAL;
+ \"|\' {
+ str = yytext[0];
+ new_string();
+ BEGIN(STRING);
+ }
+ \n BEGIN(INITIAL); current_file->lineno++; return T_EOL;
+ --- /* ignore */
+ ({n}|[-/.])+ {
+ struct kconf_id *id = kconf_id_lookup(yytext, yyleng);
+ if (id && id->flags & TF_PARAM) {
+ zconflval.id = id;
+ return id->token;
+ }
+ alloc_string(yytext, yyleng);
+ zconflval.string = text;
+ return T_WORD;
+ }
+ #.* /* comment */
+ \\\n current_file->lineno++;
+ .
+ <<EOF>> {
+ BEGIN(INITIAL);
+ }
+}
+
+<STRING>{
+ [^'"\\\n]+/\n {
+ append_string(yytext, yyleng);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ [^'"\\\n]+ {
+ append_string(yytext, yyleng);
+ }
+ \\.?/\n {
+ append_string(yytext + 1, yyleng - 1);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ }
+ \\.? {
+ append_string(yytext + 1, yyleng - 1);
+ }
+ \'|\" {
+ if (str == yytext[0]) {
+ BEGIN(PARAM);
+ zconflval.string = text;
+ return T_WORD_QUOTE;
+ } else
+ append_string(yytext, 1);
+ }
+ \n {
+ printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
+ current_file->lineno++;
+ BEGIN(INITIAL);
+ return T_EOL;
+ }
+ <<EOF>> {
+ BEGIN(INITIAL);
+ }
+}
+
+<HELP>{
+ [ \t]+ {
+ ts = 0;
+ for (i = 0; i < yyleng; i++) {
+ if (yytext[i] == '\t')
+ ts = (ts & ~7) + 8;
+ else
+ ts++;
+ }
+ last_ts = ts;
+ if (first_ts) {
+ if (ts < first_ts) {
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ ts -= first_ts;
+ while (ts > 8) {
+ append_string(" ", 8);
+ ts -= 8;
+ }
+ append_string(" ", ts);
+ }
+ }
+ [ \t]*\n/[^ \t\n] {
+ current_file->lineno++;
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+ [ \t]*\n {
+ current_file->lineno++;
+ append_string("\n", 1);
+ }
+ [^ \t\n].* {
+ append_string(yytext, yyleng);
+ if (!first_ts)
+ first_ts = last_ts;
+ }
+ <<EOF>> {
+ zconf_endhelp();
+ return T_HELPTEXT;
+ }
+}
+
+<<EOF>> {
+ if (current_file) {
+ zconf_endfile();
+ return T_EOL;
+ }
+ fclose(yyin);
+ yyterminate();
+}
+
+%%
+void zconf_starthelp(void)
+{
+ new_string();
+ last_ts = first_ts = 0;
+ BEGIN(HELP);
+}
+
+static void zconf_endhelp(void)
+{
+ zconflval.string = text;
+ BEGIN(INITIAL);
+}
+
+
+/*
+ * Try to open specified file with following names:
+ * ./name
+ * $(srctree)/name
+ * The latter is used when srctree is separate from objtree
+ * when compiling the kernel.
+ * Return NULL if file is not found.
+ */
+FILE *zconf_fopen(const char *name)
+{
+ char *env, fullname[PATH_MAX+1];
+ FILE *f;
+
+ f = fopen(name, "r");
+ if (!f && name[0] != '/') {
+ env = getenv(SRCTREE);
+ if (env) {
+ sprintf(fullname, "%s/%s", env, name);
+ f = fopen(fullname, "r");
+ }
+ }
+ return f;
+}
+
+void zconf_initscan(const char *name)
+{
+ yyin = zconf_fopen(name);
+ if (!yyin) {
+ printf("can't find file %s\n", name);
+ exit(1);
+ }
+
+ current_buf = malloc(sizeof(*current_buf));
+ memset(current_buf, 0, sizeof(*current_buf));
+
+ current_file = file_lookup(name);
+ current_file->lineno = 1;
+ current_file->flags = FILE_BUSY;
+}
+
+void zconf_nextfile(const char *name)
+{
+ struct file *file = file_lookup(name);
+ struct buffer *buf = malloc(sizeof(*buf));
+ memset(buf, 0, sizeof(*buf));
+
+ current_buf->state = YY_CURRENT_BUFFER;
+ yyin = zconf_fopen(name);
+ if (!yyin) {
+ printf("%s:%d: can't open file \"%s\"\n", zconf_curname(), zconf_lineno(), name);
+ exit(1);
+ }
+ yy_switch_to_buffer(yy_create_buffer(yyin, YY_BUF_SIZE));
+ buf->parent = current_buf;
+ current_buf = buf;
+
+ if (file->flags & FILE_BUSY) {
+ printf("recursive scan (%s)?\n", name);
+ exit(1);
+ }
+ if (file->flags & FILE_SCANNED) {
+ printf("file %s already scanned?\n", name);
+ exit(1);
+ }
+ file->flags |= FILE_BUSY;
+ file->lineno = 1;
+ file->parent = current_file;
+ current_file = file;
+}
+
+static void zconf_endfile(void)
+{
+ struct buffer *parent;
+
+ current_file->flags |= FILE_SCANNED;
+ current_file->flags &= ~FILE_BUSY;
+ current_file = current_file->parent;
+
+ parent = current_buf->parent;
+ if (parent) {
+ fclose(yyin);
+ yy_delete_buffer(YY_CURRENT_BUFFER);
+ yy_switch_to_buffer(parent->state);
+ }
+ free(current_buf);
+ current_buf = parent;
+}
+
+int zconf_lineno(void)
+{
+ return current_pos.lineno;
+}
+
+char *zconf_curname(void)
+{
+ return current_pos.file ? current_pos.file->name : "<none>";
+}
--- /dev/null
+/* A Bison parser, made by GNU Bison 2.0. */
+
+/* Skeleton parser for Yacc-like parsing with Bison,
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* As a special exception, when this file is copied by Bison into a
+ Bison output file, you may use that output file without restriction.
+ This special exception was added by the Free Software Foundation
+ in version 1.24 of Bison. */
+
+/* Written by Richard Stallman by simplifying the original so called
+ ``semantic'' parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 0
+
+/* Using locations. */
+#define YYLSP_NEEDED 0
+
+/* Substitute the variable and function names. */
+#define yyparse zconfparse
+#define yylex zconflex
+#define yyerror zconferror
+#define yylval zconflval
+#define yychar zconfchar
+#define yydebug zconfdebug
+#define yynerrs zconfnerrs
+
+
+/* Tokens. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ /* Put the tokens into the symbol table, so that GDB and other debuggers
+ know about them. */
+ enum yytokentype {
+ T_MAINMENU = 258,
+ T_MENU = 259,
+ T_ENDMENU = 260,
+ T_SOURCE = 261,
+ T_CHOICE = 262,
+ T_ENDCHOICE = 263,
+ T_COMMENT = 264,
+ T_CONFIG = 265,
+ T_MENUCONFIG = 266,
+ T_HELP = 267,
+ T_HELPTEXT = 268,
+ T_IF = 269,
+ T_ENDIF = 270,
+ T_DEPENDS = 271,
+ T_REQUIRES = 272,
+ T_OPTIONAL = 273,
+ T_PROMPT = 274,
+ T_TYPE = 275,
+ T_DEFAULT = 276,
+ T_SELECT = 277,
+ T_RANGE = 278,
+ T_ON = 279,
+ T_WORD = 280,
+ T_WORD_QUOTE = 281,
+ T_UNEQUAL = 282,
+ T_CLOSE_PAREN = 283,
+ T_OPEN_PAREN = 284,
+ T_EOL = 285,
+ T_OR = 286,
+ T_AND = 287,
+ T_EQUAL = 288,
+ T_NOT = 289
+ };
+#endif
+#define T_MAINMENU 258
+#define T_MENU 259
+#define T_ENDMENU 260
+#define T_SOURCE 261
+#define T_CHOICE 262
+#define T_ENDCHOICE 263
+#define T_COMMENT 264
+#define T_CONFIG 265
+#define T_MENUCONFIG 266
+#define T_HELP 267
+#define T_HELPTEXT 268
+#define T_IF 269
+#define T_ENDIF 270
+#define T_DEPENDS 271
+#define T_REQUIRES 272
+#define T_OPTIONAL 273
+#define T_PROMPT 274
+#define T_TYPE 275
+#define T_DEFAULT 276
+#define T_SELECT 277
+#define T_RANGE 278
+#define T_ON 279
+#define T_WORD 280
+#define T_WORD_QUOTE 281
+#define T_UNEQUAL 282
+#define T_CLOSE_PAREN 283
+#define T_OPEN_PAREN 284
+#define T_EOL 285
+#define T_OR 286
+#define T_AND 287
+#define T_EQUAL 288
+#define T_NOT 289
+
+
+
+
+/* Copy the first part of user declarations. */
+
+
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+#include "zconf.hash.c"
+
+#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
+
+#define PRINTD 0x0001
+#define DEBUG_PARSE 0x0002
+
+int cdebug = PRINTD;
+
+extern int zconflex(void);
+static void zconfprint(const char *err, ...);
+static void zconf_error(const char *err, ...);
+static void zconferror(const char *err);
+static bool zconf_endtoken(struct kconf_id *id, int starttoken, int endtoken);
+
+struct symbol *symbol_hash[257];
+
+static struct menu *current_menu, *current_entry;
+
+#define YYDEBUG 0
+#if YYDEBUG
+#define YYERROR_VERBOSE
+#endif
+
+
+/* Enabling traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 0
+#endif
+
+#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
+
+typedef union YYSTYPE {
+ char *string;
+ struct file *file;
+ struct symbol *symbol;
+ struct expr *expr;
+ struct menu *menu;
+ struct kconf_id *id;
+} YYSTYPE;
+/* Line 190 of yacc.c. */
+
+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
+# define YYSTYPE_IS_DECLARED 1
+# define YYSTYPE_IS_TRIVIAL 1
+#endif
+
+
+
+/* Copy the second part of user declarations. */
+
+
+/* Line 213 of yacc.c. */
+
+
+#if ! defined (yyoverflow) || YYERROR_VERBOSE
+
+# ifndef YYFREE
+# define YYFREE free
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# endif
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# else
+# define YYSTACK_ALLOC alloca
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's `empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# else
+# if defined (__STDC__) || defined (__cplusplus)
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# endif
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# endif
+#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */
+
+
+#if (! defined (yyoverflow) \
+ && (! defined (__cplusplus) \
+ || (defined (YYSTYPE_IS_TRIVIAL) && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ short int yyss;
+ YYSTYPE yyvs;
+ };
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (short int) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+/* Copy COUNT objects from FROM to TO. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined (__GNUC__) && 1 < __GNUC__
+# define YYCOPY(To, From, Count) \
+ __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+# else
+# define YYCOPY(To, From, Count) \
+ do \
+ { \
+ register YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (To)[yyi] = (From)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack, Stack, yysize); \
+ Stack = &yyptr->Stack; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined (__STDC__) || defined (__cplusplus)
+ typedef signed char yysigned_char;
+#else
+ typedef short int yysigned_char;
+#endif
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 3
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 264
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 35
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 42
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 104
+/* YYNRULES -- Number of states. */
+#define YYNSTATES 175
+
+/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 289
+
+#define YYTRANSLATE(YYX) \
+ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
+static const unsigned char yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+};
+
+#if YYDEBUG
+/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
+ YYRHS. */
+static const unsigned short int yyprhs[] =
+{
+ 0, 0, 3, 5, 6, 9, 12, 15, 20, 23,
+ 28, 33, 37, 39, 41, 43, 45, 47, 49, 51,
+ 53, 55, 57, 59, 61, 63, 67, 70, 74, 77,
+ 81, 84, 85, 88, 91, 94, 97, 100, 104, 109,
+ 114, 119, 125, 128, 131, 133, 137, 138, 141, 144,
+ 147, 150, 153, 158, 162, 165, 170, 171, 174, 178,
+ 180, 184, 185, 188, 191, 194, 198, 201, 203, 207,
+ 208, 211, 214, 217, 221, 225, 228, 231, 234, 235,
+ 238, 241, 244, 249, 253, 257, 258, 261, 263, 265,
+ 268, 271, 274, 276, 279, 280, 283, 285, 289, 293,
+ 297, 300, 304, 308, 310
+};
+
+/* YYRHS -- A `-1'-separated list of the rules' RHS. */
+static const yysigned_char yyrhs[] =
+{
+ 36, 0, -1, 37, -1, -1, 37, 39, -1, 37,
+ 50, -1, 37, 61, -1, 37, 3, 71, 73, -1,
+ 37, 72, -1, 37, 25, 1, 30, -1, 37, 38,
+ 1, 30, -1, 37, 1, 30, -1, 16, -1, 19,
+ -1, 20, -1, 22, -1, 18, -1, 23, -1, 21,
+ -1, 30, -1, 56, -1, 65, -1, 42, -1, 44,
+ -1, 63, -1, 25, 1, 30, -1, 1, 30, -1,
+ 10, 25, 30, -1, 41, 45, -1, 11, 25, 30,
+ -1, 43, 45, -1, -1, 45, 46, -1, 45, 69,
+ -1, 45, 67, -1, 45, 40, -1, 45, 30, -1,
+ 20, 70, 30, -1, 19, 71, 74, 30, -1, 21,
+ 75, 74, 30, -1, 22, 25, 74, 30, -1, 23,
+ 76, 76, 74, 30, -1, 7, 30, -1, 47, 51,
+ -1, 72, -1, 48, 53, 49, -1, -1, 51, 52,
+ -1, 51, 69, -1, 51, 67, -1, 51, 30, -1,
+ 51, 40, -1, 19, 71, 74, 30, -1, 20, 70,
+ 30, -1, 18, 30, -1, 21, 25, 74, 30, -1,
+ -1, 53, 39, -1, 14, 75, 73, -1, 72, -1,
+ 54, 57, 55, -1, -1, 57, 39, -1, 57, 61,
+ -1, 57, 50, -1, 4, 71, 30, -1, 58, 68,
+ -1, 72, -1, 59, 62, 60, -1, -1, 62, 39,
+ -1, 62, 61, -1, 62, 50, -1, 6, 71, 30,
+ -1, 9, 71, 30, -1, 64, 68, -1, 12, 30,
+ -1, 66, 13, -1, -1, 68, 69, -1, 68, 30,
+ -1, 68, 40, -1, 16, 24, 75, 30, -1, 16,
+ 75, 30, -1, 17, 75, 30, -1, -1, 71, 74,
+ -1, 25, -1, 26, -1, 5, 30, -1, 8, 30,
+ -1, 15, 30, -1, 30, -1, 73, 30, -1, -1,
+ 14, 75, -1, 76, -1, 76, 33, 76, -1, 76,
+ 27, 76, -1, 29, 75, 28, -1, 34, 75, -1,
+ 75, 31, 75, -1, 75, 32, 75, -1, 25, -1,
+ 26, -1
+};
+
+/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
+static const unsigned short int yyrline[] =
+{
+ 0, 103, 103, 105, 107, 108, 109, 110, 111, 112,
+ 113, 117, 121, 121, 121, 121, 121, 121, 121, 125,
+ 126, 127, 128, 129, 130, 134, 135, 141, 149, 155,
+ 163, 173, 175, 176, 177, 178, 179, 182, 190, 196,
+ 206, 212, 220, 229, 234, 242, 245, 247, 248, 249,
+ 250, 251, 254, 260, 271, 277, 287, 289, 294, 302,
+ 310, 313, 315, 316, 317, 322, 329, 334, 342, 345,
+ 347, 348, 349, 352, 360, 367, 374, 380, 387, 389,
+ 390, 391, 394, 399, 404, 412, 414, 419, 420, 423,
+ 424, 425, 429, 430, 433, 434, 437, 438, 439, 440,
+ 441, 442, 443, 446, 447
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE
+/* YYTNME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "T_MAINMENU", "T_MENU", "T_ENDMENU",
+ "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG",
+ "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
+ "T_REQUIRES", "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT",
+ "T_SELECT", "T_RANGE", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL",
+ "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL",
+ "T_NOT", "$accept", "input", "stmt_list", "option_name", "common_stmt",
+ "option_error", "config_entry_start", "config_stmt",
+ "menuconfig_entry_start", "menuconfig_stmt", "config_option_list",
+ "config_option", "choice", "choice_entry", "choice_end", "choice_stmt",
+ "choice_option_list", "choice_option", "choice_block", "if_entry",
+ "if_end", "if_stmt", "if_block", "menu", "menu_entry", "menu_end",
+ "menu_stmt", "menu_block", "source_stmt", "comment", "comment_stmt",
+ "help_start", "help", "depends_list", "depends", "prompt_stmt_opt",
+ "prompt", "end", "nl", "if_expr", "expr", "symbol", 0
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
+ token YYLEX-NUM. */
+static const unsigned short int yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289
+};
+# endif
+
+/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const unsigned char yyr1[] =
+{
+ 0, 35, 36, 37, 37, 37, 37, 37, 37, 37,
+ 37, 37, 38, 38, 38, 38, 38, 38, 38, 39,
+ 39, 39, 39, 39, 39, 40, 40, 41, 42, 43,
+ 44, 45, 45, 45, 45, 45, 45, 46, 46, 46,
+ 46, 46, 47, 48, 49, 50, 51, 51, 51, 51,
+ 51, 51, 52, 52, 52, 52, 53, 53, 54, 55,
+ 56, 57, 57, 57, 57, 58, 59, 60, 61, 62,
+ 62, 62, 62, 63, 64, 65, 66, 67, 68, 68,
+ 68, 68, 69, 69, 69, 70, 70, 71, 71, 72,
+ 72, 72, 73, 73, 74, 74, 75, 75, 75, 75,
+ 75, 75, 75, 76, 76
+};
+
+/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
+static const unsigned char yyr2[] =
+{
+ 0, 2, 1, 0, 2, 2, 2, 4, 2, 4,
+ 4, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 3, 2, 3, 2, 3,
+ 2, 0, 2, 2, 2, 2, 2, 3, 4, 4,
+ 4, 5, 2, 2, 1, 3, 0, 2, 2, 2,
+ 2, 2, 4, 3, 2, 4, 0, 2, 3, 1,
+ 3, 0, 2, 2, 2, 3, 2, 1, 3, 0,
+ 2, 2, 2, 3, 3, 2, 2, 2, 0, 2,
+ 2, 2, 4, 3, 3, 0, 2, 1, 1, 2,
+ 2, 2, 1, 2, 0, 2, 1, 3, 3, 3,
+ 2, 3, 3, 1, 1
+};
+
+/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
+ STATE-NUM when YYTABLE doesn't specify something else to do. Zero
+ means the default is an error. */
+static const unsigned char yydefact[] =
+{
+ 3, 0, 0, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 12, 16, 13, 14,
+ 18, 15, 17, 0, 19, 0, 4, 31, 22, 31,
+ 23, 46, 56, 5, 61, 20, 78, 69, 6, 24,
+ 78, 21, 8, 11, 87, 88, 0, 0, 89, 0,
+ 42, 90, 0, 0, 0, 103, 104, 0, 0, 0,
+ 96, 91, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 92, 7, 65, 73, 74, 27, 29, 0,
+ 100, 0, 0, 58, 0, 0, 9, 10, 0, 0,
+ 0, 0, 0, 85, 0, 0, 0, 0, 36, 35,
+ 32, 0, 34, 33, 0, 0, 85, 0, 50, 51,
+ 47, 49, 48, 57, 45, 44, 62, 64, 60, 63,
+ 59, 80, 81, 79, 70, 72, 68, 71, 67, 93,
+ 99, 101, 102, 98, 97, 26, 76, 0, 0, 0,
+ 94, 0, 94, 94, 94, 0, 0, 77, 54, 94,
+ 0, 94, 0, 83, 84, 0, 0, 37, 86, 0,
+ 0, 94, 25, 0, 53, 0, 82, 95, 38, 39,
+ 40, 0, 52, 55, 41
+};
+
+/* YYDEFGOTO[NTERM-NUM]. */
+static const short int yydefgoto[] =
+{
+ -1, 1, 2, 25, 26, 99, 27, 28, 29, 30,
+ 64, 100, 31, 32, 114, 33, 66, 110, 67, 34,
+ 118, 35, 68, 36, 37, 126, 38, 70, 39, 40,
+ 41, 101, 102, 69, 103, 141, 142, 42, 73, 156,
+ 59, 60
+};
+
+/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+#define YYPACT_NINF -78
+static const short int yypact[] =
+{
+ -78, 2, 159, -78, -21, 0, 0, -12, 0, 1,
+ 4, 0, 27, 38, 60, 58, -78, -78, -78, -78,
+ -78, -78, -78, 100, -78, 104, -78, -78, -78, -78,
+ -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
+ -78, -78, -78, -78, -78, -78, 86, 113, -78, 114,
+ -78, -78, 125, 127, 128, -78, -78, 60, 60, 210,
+ 65, -78, 141, 142, 39, 103, 182, 200, 6, 66,
+ 6, 131, -78, 146, -78, -78, -78, -78, -78, 196,
+ -78, 60, 60, 146, 40, 40, -78, -78, 155, 156,
+ -2, 60, 0, 0, 60, 105, 40, 194, -78, -78,
+ -78, 206, -78, -78, 183, 0, 0, 195, -78, -78,
+ -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
+ -78, -78, -78, -78, -78, -78, -78, -78, -78, -78,
+ -78, 197, -78, -78, -78, -78, -78, 60, 213, 216,
+ 212, 203, 212, 190, 212, 40, 208, -78, -78, 212,
+ 222, 212, 219, -78, -78, 60, 223, -78, -78, 224,
+ 225, 212, -78, 226, -78, 227, -78, 47, -78, -78,
+ -78, 228, -78, -78, -78
+};
+
+/* YYPGOTO[NTERM-NUM]. */
+static const short int yypgoto[] =
+{
+ -78, -78, -78, -78, 164, -36, -78, -78, -78, -78,
+ 230, -78, -78, -78, -78, 29, -78, -78, -78, -78,
+ -78, -78, -78, -78, -78, -78, 59, -78, -78, -78,
+ -78, -78, 198, 220, 24, 157, -5, 169, 202, 74,
+ -53, -77
+};
+
+/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule which
+ number is the opposite. If zero, do what YYDEFACT says.
+ If YYTABLE_NINF, syntax error. */
+#define YYTABLE_NINF -76
+static const short int yytable[] =
+{
+ 46, 47, 3, 49, 79, 80, 52, 133, 134, 43,
+ 6, 7, 8, 9, 10, 11, 12, 13, 48, 145,
+ 14, 15, 137, 55, 56, 44, 45, 57, 131, 132,
+ 109, 50, 58, 122, 51, 122, 24, 138, 139, -28,
+ 88, 143, -28, -28, -28, -28, -28, -28, -28, -28,
+ -28, 89, 53, -28, -28, 90, 91, -28, 92, 93,
+ 94, 95, 96, 54, 97, 55, 56, 88, 161, 98,
+ -66, -66, -66, -66, -66, -66, -66, -66, 81, 82,
+ -66, -66, 90, 91, 152, 55, 56, 140, 61, 57,
+ 112, 97, 84, 123, 58, 123, 121, 117, 85, 125,
+ 149, 62, 167, -30, 88, 63, -30, -30, -30, -30,
+ -30, -30, -30, -30, -30, 89, 72, -30, -30, 90,
+ 91, -30, 92, 93, 94, 95, 96, 119, 97, 127,
+ 144, -75, 88, 98, -75, -75, -75, -75, -75, -75,
+ -75, -75, -75, 74, 75, -75, -75, 90, 91, -75,
+ -75, -75, -75, -75, -75, 76, 97, 77, 78, -2,
+ 4, 121, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 86, 87, 14, 15, 16, 129, 17, 18, 19,
+ 20, 21, 22, 88, 23, 135, 136, -43, -43, 24,
+ -43, -43, -43, -43, 89, 146, -43, -43, 90, 91,
+ 104, 105, 106, 107, 155, 7, 8, 97, 10, 11,
+ 12, 13, 108, 148, 14, 15, 158, 159, 160, 147,
+ 151, 81, 82, 163, 130, 165, 155, 81, 82, 82,
+ 24, 113, 116, 157, 124, 171, 115, 120, 162, 128,
+ 72, 81, 82, 153, 81, 82, 154, 81, 82, 166,
+ 81, 82, 164, 168, 169, 170, 172, 173, 174, 65,
+ 71, 83, 0, 150, 111
+};
+
+static const short int yycheck[] =
+{
+ 5, 6, 0, 8, 57, 58, 11, 84, 85, 30,
+ 4, 5, 6, 7, 8, 9, 10, 11, 30, 96,
+ 14, 15, 24, 25, 26, 25, 26, 29, 81, 82,
+ 66, 30, 34, 69, 30, 71, 30, 90, 91, 0,
+ 1, 94, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 25, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 25, 25, 25, 26, 1, 145, 30,
+ 4, 5, 6, 7, 8, 9, 10, 11, 31, 32,
+ 14, 15, 16, 17, 137, 25, 26, 92, 30, 29,
+ 66, 25, 27, 69, 34, 71, 30, 68, 33, 70,
+ 105, 1, 155, 0, 1, 1, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 30, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 68, 25, 70,
+ 25, 0, 1, 30, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 30, 30, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 30, 25, 30, 30, 0,
+ 1, 30, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 30, 30, 14, 15, 16, 30, 18, 19, 20,
+ 21, 22, 23, 1, 25, 30, 30, 5, 6, 30,
+ 8, 9, 10, 11, 12, 1, 14, 15, 16, 17,
+ 18, 19, 20, 21, 14, 5, 6, 25, 8, 9,
+ 10, 11, 30, 30, 14, 15, 142, 143, 144, 13,
+ 25, 31, 32, 149, 28, 151, 14, 31, 32, 32,
+ 30, 67, 68, 30, 70, 161, 67, 68, 30, 70,
+ 30, 31, 32, 30, 31, 32, 30, 31, 32, 30,
+ 31, 32, 30, 30, 30, 30, 30, 30, 30, 29,
+ 40, 59, -1, 106, 66
+};
+
+/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const unsigned char yystos[] =
+{
+ 0, 36, 37, 0, 1, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 14, 15, 16, 18, 19, 20,
+ 21, 22, 23, 25, 30, 38, 39, 41, 42, 43,
+ 44, 47, 48, 50, 54, 56, 58, 59, 61, 63,
+ 64, 65, 72, 30, 25, 26, 71, 71, 30, 71,
+ 30, 30, 71, 25, 25, 25, 26, 29, 34, 75,
+ 76, 30, 1, 1, 45, 45, 51, 53, 57, 68,
+ 62, 68, 30, 73, 30, 30, 30, 30, 30, 75,
+ 75, 31, 32, 73, 27, 33, 30, 30, 1, 12,
+ 16, 17, 19, 20, 21, 22, 23, 25, 30, 40,
+ 46, 66, 67, 69, 18, 19, 20, 21, 30, 40,
+ 52, 67, 69, 39, 49, 72, 39, 50, 55, 61,
+ 72, 30, 40, 69, 39, 50, 60, 61, 72, 30,
+ 28, 75, 75, 76, 76, 30, 30, 24, 75, 75,
+ 71, 70, 71, 75, 25, 76, 1, 13, 30, 71,
+ 70, 25, 75, 30, 30, 14, 74, 30, 74, 74,
+ 74, 76, 30, 74, 30, 74, 30, 75, 30, 30,
+ 30, 74, 30, 30, 30
+};
+
+#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
+# define YYSIZE_T __SIZE_TYPE__
+#endif
+#if ! defined (YYSIZE_T) && defined (size_t)
+# define YYSIZE_T size_t
+#endif
+#if ! defined (YYSIZE_T)
+# if defined (__STDC__) || defined (__cplusplus)
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# endif
+#endif
+#if ! defined (YYSIZE_T)
+# define YYSIZE_T unsigned int
+#endif
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+/* Like YYERROR except do call yyerror. This remains here temporarily
+ to ease the transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+
+#define YYFAIL goto yyerrlab
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ yytoken = YYTRANSLATE (yychar); \
+ YYPOPSTACK; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror ("syntax error: cannot back up");\
+ YYERROR; \
+ } \
+while (0)
+
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
+ If N is 0, then set CURRENT to the empty location which ends
+ the previous symbol: RHS[0] (always defined). */
+
+#define YYRHSLOC(Rhs, K) ((Rhs)[K])
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+ do \
+ if (N) \
+ { \
+ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC (Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC (Rhs, 0).last_column; \
+ } \
+ while (0)
+#endif
+
+
+/* YY_LOCATION_PRINT -- Print the location on the stream.
+ This macro was not mandated originally: define only if we know
+ we won't break user code: when these are the locations we know. */
+
+#ifndef YY_LOCATION_PRINT
+# if YYLTYPE_IS_TRIVIAL
+# define YY_LOCATION_PRINT(File, Loc) \
+ fprintf (File, "%d.%d-%d.%d", \
+ (Loc).first_line, (Loc).first_column, \
+ (Loc).last_line, (Loc).last_column)
+# else
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+# endif
+#endif
+
+
+/* YYLEX -- calling `yylex' with the right arguments. */
+
+#ifdef YYLEX_PARAM
+# define YYLEX yylex (YYLEX_PARAM)
+#else
+# define YYLEX yylex ()
+#endif
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yysymprint (stderr, \
+ Type, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yy_stack_print (short int *bottom, short int *top)
+#else
+static void
+yy_stack_print (bottom, top)
+ short int *bottom;
+ short int *top;
+#endif
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (/* Nothing. */; bottom <= top; ++bottom)
+ YYFPRINTF (stderr, " %d", *bottom);
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yy_reduce_print (int yyrule)
+#else
+static void
+yy_reduce_print (yyrule)
+ int yyrule;
+#endif
+{
+ int yyi;
+ unsigned int yylno = yyrline[yyrule];
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %u), ",
+ yyrule - 1, yylno);
+ /* Print the symbols being reduced, and their result. */
+ for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++)
+ YYFPRINTF (stderr, "%s ", yytname [yyrhs[yyi]]);
+ YYFPRINTF (stderr, "-> %s\n", yytname [yyr1[yyrule]]);
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (Rule); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+\f
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined (__GLIBC__) && defined (_STRING_H)
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+static YYSIZE_T
+# if defined (__STDC__) || defined (__cplusplus)
+yystrlen (const char *yystr)
+# else
+yystrlen (yystr)
+ const char *yystr;
+# endif
+{
+ register const char *yys = yystr;
+
+ while (*yys++ != '\0')
+ continue;
+
+ return yys - yystr - 1;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined (__GLIBC__) && defined (_STRING_H) && defined (_GNU_SOURCE)
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+# if defined (__STDC__) || defined (__cplusplus)
+yystpcpy (char *yydest, const char *yysrc)
+# else
+yystpcpy (yydest, yysrc)
+ char *yydest;
+ const char *yysrc;
+# endif
+{
+ register char *yyd = yydest;
+ register const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+#endif /* !YYERROR_VERBOSE */
+
+\f
+
+#if YYDEBUG
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yysymprint (FILE *yyoutput, int yytype, YYSTYPE *yyvaluep)
+#else
+static void
+yysymprint (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE *yyvaluep;
+#endif
+{
+ /* Pacify ``unused variable'' warnings. */
+ (void) yyvaluep;
+
+ if (yytype < YYNTOKENS)
+ YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
+ else
+ YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
+
+
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
+# endif
+ switch (yytype)
+ {
+ default:
+ break;
+ }
+ YYFPRINTF (yyoutput, ")");
+}
+
+#endif /* ! YYDEBUG */
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+#if defined (__STDC__) || defined (__cplusplus)
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
+#else
+static void
+yydestruct (yymsg, yytype, yyvaluep)
+ const char *yymsg;
+ int yytype;
+ YYSTYPE *yyvaluep;
+#endif
+{
+ /* Pacify ``unused variable'' warnings. */
+ (void) yyvaluep;
+
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ switch (yytype)
+ {
+ case 48: /* choice_entry */
+
+ {
+ fprintf(stderr, "%s:%d: missing end statement for this entry\n",
+ (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
+ if (current_menu == (yyvaluep->menu))
+ menu_end_menu();
+};
+
+ break;
+ case 54: /* if_entry */
+
+ {
+ fprintf(stderr, "%s:%d: missing end statement for this entry\n",
+ (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
+ if (current_menu == (yyvaluep->menu))
+ menu_end_menu();
+};
+
+ break;
+ case 59: /* menu_entry */
+
+ {
+ fprintf(stderr, "%s:%d: missing end statement for this entry\n",
+ (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
+ if (current_menu == (yyvaluep->menu))
+ menu_end_menu();
+};
+
+ break;
+
+ default:
+ break;
+ }
+}
+\f
+
+/* Prevent warnings from -Wmissing-prototypes. */
+
+#ifdef YYPARSE_PARAM
+# if defined (__STDC__) || defined (__cplusplus)
+int yyparse (void *YYPARSE_PARAM);
+# else
+int yyparse ();
+# endif
+#else /* ! YYPARSE_PARAM */
+#if defined (__STDC__) || defined (__cplusplus)
+int yyparse (void);
+#else
+int yyparse ();
+#endif
+#endif /* ! YYPARSE_PARAM */
+
+
+
+/* The look-ahead symbol. */
+int yychar;
+
+/* The semantic value of the look-ahead symbol. */
+YYSTYPE yylval;
+
+/* Number of syntax errors so far. */
+int yynerrs;
+
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+#ifdef YYPARSE_PARAM
+# if defined (__STDC__) || defined (__cplusplus)
+int yyparse (void *YYPARSE_PARAM)
+# else
+int yyparse (YYPARSE_PARAM)
+ void *YYPARSE_PARAM;
+# endif
+#else /* ! YYPARSE_PARAM */
+#if defined (__STDC__) || defined (__cplusplus)
+int
+yyparse (void)
+#else
+int
+yyparse ()
+
+#endif
+#endif
+{
+
+ register int yystate;
+ register int yyn;
+ int yyresult;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+ /* Look-ahead token as an internal (translated) token number. */
+ int yytoken = 0;
+
+ /* Three stacks and their tools:
+ `yyss': related to states,
+ `yyvs': related to semantic values,
+ `yyls': related to locations.
+
+ Refer to the stacks thru separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ short int yyssa[YYINITDEPTH];
+ short int *yyss = yyssa;
+ register short int *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs = yyvsa;
+ register YYSTYPE *yyvsp;
+
+
+
+#define YYPOPSTACK (yyvsp--, yyssp--)
+
+ YYSIZE_T yystacksize = YYINITDEPTH;
+
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+
+ /* When reducing, the number of symbols on the RHS of the reduced
+ rule. */
+ int yylen;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+
+ yyssp = yyss;
+ yyvsp = yyvs;
+
+
+ yyvsp[0] = yylval;
+
+ goto yysetstate;
+
+/*------------------------------------------------------------.
+| yynewstate -- Push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+ yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. so pushing a state here evens the stacks.
+ */
+ yyssp++;
+
+ yysetstate:
+ *yyssp = yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ short int *yyss1 = yyss;
+
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow ("parser stack overflow",
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+#else /* no yyoverflow */
+# ifndef YYSTACK_RELOCATE
+ goto yyoverflowlab;
+# else
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyoverflowlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ short int *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyoverflowlab;
+ YYSTACK_RELOCATE (yyss);
+ YYSTACK_RELOCATE (yyvs);
+
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long int) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ goto yybackup;
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+
+/* Do appropriate processing given the current state. */
+/* Read a look-ahead token if we need one and don't already have one. */
+/* yyresume: */
+
+ /* First try to decide what to do without reference to look-ahead token. */
+
+ yyn = yypact[yystate];
+ if (yyn == YYPACT_NINF)
+ goto yydefault;
+
+ /* Not known => get a look-ahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = YYLEX;
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yyn == 0 || yyn == YYTABLE_NINF)
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ /* Shift the look-ahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the token being shifted unless it is eof. */
+ if (yychar != YYEOF)
+ yychar = YYEMPTY;
+
+ *++yyvsp = yylval;
+
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- Do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ `$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 8:
+
+ { zconf_error("unexpected end statement"); ;}
+ break;
+
+ case 9:
+
+ { zconf_error("unknown statement \"%s\"", (yyvsp[-2].string)); ;}
+ break;
+
+ case 10:
+
+ {
+ zconf_error("unexpected option \"%s\"", kconf_id_strings + (yyvsp[-2].id)->name);
+;}
+ break;
+
+ case 11:
+
+ { zconf_error("invalid statement"); ;}
+ break;
+
+ case 25:
+
+ { zconf_error("unknown option \"%s\"", (yyvsp[-2].string)); ;}
+ break;
+
+ case 26:
+
+ { zconf_error("invalid option"); ;}
+ break;
+
+ case 27:
+
+ {
+ struct symbol *sym = sym_lookup((yyvsp[-1].string), 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
+;}
+ break;
+
+ case 28:
+
+ {
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 29:
+
+ {
+ struct symbol *sym = sym_lookup((yyvsp[-1].string), 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
+;}
+ break;
+
+ case 30:
+
+ {
+ if (current_entry->prompt)
+ current_entry->prompt->type = P_MENU;
+ else
+ zconfprint("warning: menuconfig statement without prompt");
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 37:
+
+ {
+ menu_set_type((yyvsp[-2].id)->stype);
+ printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ (yyvsp[-2].id)->stype);
+;}
+ break;
+
+ case 38:
+
+ {
+ menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 39:
+
+ {
+ menu_add_expr(P_DEFAULT, (yyvsp[-2].expr), (yyvsp[-1].expr));
+ if ((yyvsp[-3].id)->stype != S_UNKNOWN)
+ menu_set_type((yyvsp[-3].id)->stype);
+ printd(DEBUG_PARSE, "%s:%d:default(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ (yyvsp[-3].id)->stype);
+;}
+ break;
+
+ case 40:
+
+ {
+ menu_add_symbol(P_SELECT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 41:
+
+ {
+ menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[-3].symbol), (yyvsp[-2].symbol)), (yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 42:
+
+ {
+ struct symbol *sym = sym_lookup(NULL, 0);
+ sym->flags |= SYMBOL_CHOICE;
+ menu_add_entry(sym);
+ menu_add_expr(P_CHOICE, NULL, NULL);
+ printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 43:
+
+ {
+ (yyval.menu) = menu_add_menu();
+;}
+ break;
+
+ case 44:
+
+ {
+ if (zconf_endtoken((yyvsp[0].id), T_CHOICE, T_ENDCHOICE)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
+ }
+;}
+ break;
+
+ case 52:
+
+ {
+ menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 53:
+
+ {
+ if ((yyvsp[-2].id)->stype == S_BOOLEAN || (yyvsp[-2].id)->stype == S_TRISTATE) {
+ menu_set_type((yyvsp[-2].id)->stype);
+ printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ (yyvsp[-2].id)->stype);
+ } else
+ YYERROR;
+;}
+ break;
+
+ case 54:
+
+ {
+ current_entry->sym->flags |= SYMBOL_OPTIONAL;
+ printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 55:
+
+ {
+ if ((yyvsp[-3].id)->stype == S_UNKNOWN) {
+ menu_add_symbol(P_DEFAULT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:default\n",
+ zconf_curname(), zconf_lineno());
+ } else
+ YYERROR;
+;}
+ break;
+
+ case 58:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
+ menu_add_entry(NULL);
+ menu_add_dep((yyvsp[-1].expr));
+ (yyval.menu) = menu_add_menu();
+;}
+ break;
+
+ case 59:
+
+ {
+ if (zconf_endtoken((yyvsp[0].id), T_IF, T_ENDIF)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
+ }
+;}
+ break;
+
+ case 65:
+
+ {
+ menu_add_entry(NULL);
+ menu_add_prompt(P_MENU, (yyvsp[-1].string), NULL);
+ printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 66:
+
+ {
+ (yyval.menu) = menu_add_menu();
+;}
+ break;
+
+ case 67:
+
+ {
+ if (zconf_endtoken((yyvsp[0].id), T_MENU, T_ENDMENU)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
+ }
+;}
+ break;
+
+ case 73:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
+ zconf_nextfile((yyvsp[-1].string));
+;}
+ break;
+
+ case 74:
+
+ {
+ menu_add_entry(NULL);
+ menu_add_prompt(P_COMMENT, (yyvsp[-1].string), NULL);
+ printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 75:
+
+ {
+ menu_end_entry();
+;}
+ break;
+
+ case 76:
+
+ {
+ printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
+ zconf_starthelp();
+;}
+ break;
+
+ case 77:
+
+ {
+ current_entry->sym->help = (yyvsp[0].string);
+;}
+ break;
+
+ case 82:
+
+ {
+ menu_add_dep((yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 83:
+
+ {
+ menu_add_dep((yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:depends\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 84:
+
+ {
+ menu_add_dep((yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:requires\n", zconf_curname(), zconf_lineno());
+;}
+ break;
+
+ case 86:
+
+ {
+ menu_add_prompt(P_PROMPT, (yyvsp[-1].string), (yyvsp[0].expr));
+;}
+ break;
+
+ case 89:
+
+ { (yyval.id) = (yyvsp[-1].id); ;}
+ break;
+
+ case 90:
+
+ { (yyval.id) = (yyvsp[-1].id); ;}
+ break;
+
+ case 91:
+
+ { (yyval.id) = (yyvsp[-1].id); ;}
+ break;
+
+ case 94:
+
+ { (yyval.expr) = NULL; ;}
+ break;
+
+ case 95:
+
+ { (yyval.expr) = (yyvsp[0].expr); ;}
+ break;
+
+ case 96:
+
+ { (yyval.expr) = expr_alloc_symbol((yyvsp[0].symbol)); ;}
+ break;
+
+ case 97:
+
+ { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;}
+ break;
+
+ case 98:
+
+ { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); ;}
+ break;
+
+ case 99:
+
+ { (yyval.expr) = (yyvsp[-1].expr); ;}
+ break;
+
+ case 100:
+
+ { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[0].expr)); ;}
+ break;
+
+ case 101:
+
+ { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[-2].expr), (yyvsp[0].expr)); ;}
+ break;
+
+ case 102:
+
+ { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[-2].expr), (yyvsp[0].expr)); ;}
+ break;
+
+ case 103:
+
+ { (yyval.symbol) = sym_lookup((yyvsp[0].string), 0); free((yyvsp[0].string)); ;}
+ break;
+
+ case 104:
+
+ { (yyval.symbol) = sym_lookup((yyvsp[0].string), 1); free((yyvsp[0].string)); ;}
+ break;
+
+
+ }
+
+/* Line 1037 of yacc.c. */
+
+\f
+ yyvsp -= yylen;
+ yyssp -= yylen;
+
+
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+
+ /* Now `shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
+ if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTOKENS];
+
+ goto yynewstate;
+
+
+/*------------------------------------.
+| yyerrlab -- here on detecting error |
+`------------------------------------*/
+yyerrlab:
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if YYERROR_VERBOSE
+ yyn = yypact[yystate];
+
+ if (YYPACT_NINF < yyn && yyn < YYLAST)
+ {
+ YYSIZE_T yysize = 0;
+ int yytype = YYTRANSLATE (yychar);
+ const char* yyprefix;
+ char *yymsg;
+ int yyx;
+
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yycount = 0;
+
+ yyprefix = ", expecting ";
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
+ {
+ yysize += yystrlen (yyprefix) + yystrlen (yytname [yyx]);
+ yycount += 1;
+ if (yycount == 5)
+ {
+ yysize = 0;
+ break;
+ }
+ }
+ yysize += (sizeof ("syntax error, unexpected ")
+ + yystrlen (yytname[yytype]));
+ yymsg = (char *) YYSTACK_ALLOC (yysize);
+ if (yymsg != 0)
+ {
+ char *yyp = yystpcpy (yymsg, "syntax error, unexpected ");
+ yyp = yystpcpy (yyp, yytname[yytype]);
+
+ if (yycount < 5)
+ {
+ yyprefix = ", expecting ";
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
+ {
+ yyp = yystpcpy (yyp, yyprefix);
+ yyp = yystpcpy (yyp, yytname[yyx]);
+ yyprefix = " or ";
+ }
+ }
+ yyerror (yymsg);
+ YYSTACK_FREE (yymsg);
+ }
+ else
+ yyerror ("syntax error; also virtual memory exhausted");
+ }
+ else
+#endif /* YYERROR_VERBOSE */
+ yyerror ("syntax error");
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse look-ahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* If at end of input, pop the error token,
+ then the rest of the stack, then return failure. */
+ if (yychar == YYEOF)
+ for (;;)
+ {
+
+ YYPOPSTACK;
+ if (yyssp == yyss)
+ YYABORT;
+ yydestruct ("Error: popping",
+ yystos[*yyssp], yyvsp);
+ }
+ }
+ else
+ {
+ yydestruct ("Error: discarding", yytoken, &yylval);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse look-ahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+
+#ifdef __GNUC__
+ /* Pacify GCC when the user code never invokes YYERROR and the label
+ yyerrorlab therefore never appears in user code. */
+ if (0)
+ goto yyerrorlab;
+#endif
+
+yyvsp -= yylen;
+ yyssp -= yylen;
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (yyn != YYPACT_NINF)
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping", yystos[yystate], yyvsp);
+ YYPOPSTACK;
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ if (yyn == YYFINAL)
+ YYACCEPT;
+
+ *++yyvsp = yylval;
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yydestruct ("Error: discarding lookahead",
+ yytoken, &yylval);
+ yychar = YYEMPTY;
+ yyresult = 1;
+ goto yyreturn;
+
+#ifndef yyoverflow
+/*----------------------------------------------.
+| yyoverflowlab -- parser overflow comes here. |
+`----------------------------------------------*/
+yyoverflowlab:
+ yyerror ("parser stack overflow");
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+yyreturn:
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+ return yyresult;
+}
+
+
+
+
+
+void conf_parse(const char *name)
+{
+ struct symbol *sym;
+ int i;
+
+ zconf_initscan(name);
+
+ sym_init();
+ menu_init();
+ modules_sym = sym_lookup("MODULES", 0);
+ rootmenu.prompt = menu_add_prompt(P_MENU, "LWK Configuration", NULL);
+
+#if YYDEBUG
+ if (getenv("ZCONF_DEBUG"))
+ zconfdebug = 1;
+#endif
+ zconfparse();
+ if (zconfnerrs)
+ exit(1);
+ menu_finalize(&rootmenu);
+ for_all_symbols(i, sym) {
+ sym_check_deps(sym);
+ }
+
+ sym_change_count = 1;
+}
+
+const char *zconf_tokenname(int token)
+{
+ switch (token) {
+ case T_MENU: return "menu";
+ case T_ENDMENU: return "endmenu";
+ case T_CHOICE: return "choice";
+ case T_ENDCHOICE: return "endchoice";
+ case T_IF: return "if";
+ case T_ENDIF: return "endif";
+ case T_DEPENDS: return "depends";
+ }
+ return "<token>";
+}
+
+static bool zconf_endtoken(struct kconf_id *id, int starttoken, int endtoken)
+{
+ if (id->token != endtoken) {
+ zconf_error("unexpected '%s' within %s block",
+ kconf_id_strings + id->name, zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ if (current_menu->file != current_file) {
+ zconf_error("'%s' in different file than '%s'",
+ kconf_id_strings + id->name, zconf_tokenname(starttoken));
+ fprintf(stderr, "%s:%d: location of the '%s'\n",
+ current_menu->file->name, current_menu->lineno,
+ zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ return true;
+}
+
+static void zconfprint(const char *err, ...)
+{
+ va_list ap;
+
+ fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno());
+ va_start(ap, err);
+ vfprintf(stderr, err, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+static void zconf_error(const char *err, ...)
+{
+ va_list ap;
+
+ zconfnerrs++;
+ fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno());
+ va_start(ap, err);
+ vfprintf(stderr, err, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+static void zconferror(const char *err)
+{
+#if YYDEBUG
+ fprintf(stderr, "%s:%d: %s\n", zconf_curname(), zconf_lineno() + 1, err);
+#endif
+}
+
+void print_quoted_string(FILE *out, const char *str)
+{
+ const char *p;
+ int len;
+
+ putc('"', out);
+ while ((p = strchr(str, '"'))) {
+ len = p - str;
+ if (len)
+ fprintf(out, "%.*s", len, str);
+ fputs("\\\"", out);
+ str = p + 1;
+ }
+ fputs(str, out);
+ putc('"', out);
+}
+
+void print_symbol(FILE *out, struct menu *menu)
+{
+ struct symbol *sym = menu->sym;
+ struct property *prop;
+
+ if (sym_is_choice(sym))
+ fprintf(out, "choice\n");
+ else
+ fprintf(out, "config %s\n", sym->name);
+ switch (sym->type) {
+ case S_BOOLEAN:
+ fputs(" boolean\n", out);
+ break;
+ case S_TRISTATE:
+ fputs(" tristate\n", out);
+ break;
+ case S_STRING:
+ fputs(" string\n", out);
+ break;
+ case S_INT:
+ fputs(" integer\n", out);
+ break;
+ case S_HEX:
+ fputs(" hex\n", out);
+ break;
+ default:
+ fputs(" ???\n", out);
+ break;
+ }
+ for (prop = sym->prop; prop; prop = prop->next) {
+ if (prop->menu != menu)
+ continue;
+ switch (prop->type) {
+ case P_PROMPT:
+ fputs(" prompt ", out);
+ print_quoted_string(out, prop->text);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_DEFAULT:
+ fputs( " default ", out);
+ expr_fprint(prop->expr, out);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_CHOICE:
+ fputs(" #choice value\n", out);
+ break;
+ default:
+ fprintf(out, " unknown prop %d!\n", prop->type);
+ break;
+ }
+ }
+ if (sym->help) {
+ int len = strlen(sym->help);
+ while (sym->help[--len] == '\n')
+ sym->help[len] = 0;
+ fprintf(out, " help\n%s\n", sym->help);
+ }
+ fputc('\n', out);
+}
+
+void zconfdump(FILE *out)
+{
+ struct property *prop;
+ struct symbol *sym;
+ struct menu *menu;
+
+ menu = rootmenu.list;
+ while (menu) {
+ if ((sym = menu->sym))
+ print_symbol(out, menu);
+ else if ((prop = menu->prompt)) {
+ switch (prop->type) {
+ case P_COMMENT:
+ fputs("\ncomment ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ case P_MENU:
+ fputs("\nmenu ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ default:
+ ;
+ }
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" depends ", out);
+ expr_fprint(prop->visible.expr, out);
+ fputc('\n', out);
+ }
+ fputs("\n", out);
+ }
+
+ if (menu->list)
+ menu = menu->list;
+ else if (menu->next)
+ menu = menu->next;
+ else while ((menu = menu->parent)) {
+ if (menu->prompt && menu->prompt->type == P_MENU)
+ fputs("\nendmenu\n", out);
+ if (menu->next) {
+ menu = menu->next;
+ break;
+ }
+ }
+ }
+}
+
+#include "lex.zconf.c"
+#include "util.c"
+#include "confdata.c"
+#include "expr.c"
+#include "symbol.c"
+#include "menu.c"
+
+
--- /dev/null
+%{
+/*
+ * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+
+#include "zconf.hash.c"
+
+#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
+
+#define PRINTD 0x0001
+#define DEBUG_PARSE 0x0002
+
+int cdebug = PRINTD;
+
+extern int zconflex(void);
+static void zconfprint(const char *err, ...);
+static void zconf_error(const char *err, ...);
+static void zconferror(const char *err);
+static bool zconf_endtoken(struct kconf_id *id, int starttoken, int endtoken);
+
+struct symbol *symbol_hash[257];
+
+static struct menu *current_menu, *current_entry;
+
+#define YYDEBUG 0
+#if YYDEBUG
+#define YYERROR_VERBOSE
+#endif
+%}
+%expect 26
+
+%union
+{
+ char *string;
+ struct file *file;
+ struct symbol *symbol;
+ struct expr *expr;
+ struct menu *menu;
+ struct kconf_id *id;
+}
+
+%token <id>T_MAINMENU
+%token <id>T_MENU
+%token <id>T_ENDMENU
+%token <id>T_SOURCE
+%token <id>T_CHOICE
+%token <id>T_ENDCHOICE
+%token <id>T_COMMENT
+%token <id>T_CONFIG
+%token <id>T_MENUCONFIG
+%token <id>T_HELP
+%token <string> T_HELPTEXT
+%token <id>T_IF
+%token <id>T_ENDIF
+%token <id>T_DEPENDS
+%token <id>T_REQUIRES
+%token <id>T_OPTIONAL
+%token <id>T_PROMPT
+%token <id>T_TYPE
+%token <id>T_DEFAULT
+%token <id>T_SELECT
+%token <id>T_RANGE
+%token <id>T_ON
+%token <string> T_WORD
+%token <string> T_WORD_QUOTE
+%token T_UNEQUAL
+%token T_CLOSE_PAREN
+%token T_OPEN_PAREN
+%token T_EOL
+
+%left T_OR
+%left T_AND
+%left T_EQUAL T_UNEQUAL
+%nonassoc T_NOT
+
+%type <string> prompt
+%type <symbol> symbol
+%type <expr> expr
+%type <expr> if_expr
+%type <id> end
+%type <id> option_name
+%type <menu> if_entry menu_entry choice_entry
+
+%destructor {
+ fprintf(stderr, "%s:%d: missing end statement for this entry\n",
+ $$->file->name, $$->lineno);
+ if (current_menu == $$)
+ menu_end_menu();
+} if_entry menu_entry choice_entry
+
+%%
+input: stmt_list;
+
+stmt_list:
+ /* empty */
+ | stmt_list common_stmt
+ | stmt_list choice_stmt
+ | stmt_list menu_stmt
+ | stmt_list T_MAINMENU prompt nl
+ | stmt_list end { zconf_error("unexpected end statement"); }
+ | stmt_list T_WORD error T_EOL { zconf_error("unknown statement \"%s\"", $2); }
+ | stmt_list option_name error T_EOL
+{
+ zconf_error("unexpected option \"%s\"", kconf_id_strings + $2->name);
+}
+ | stmt_list error T_EOL { zconf_error("invalid statement"); }
+;
+
+option_name:
+ T_DEPENDS | T_PROMPT | T_TYPE | T_SELECT | T_OPTIONAL | T_RANGE | T_DEFAULT
+;
+
+common_stmt:
+ T_EOL
+ | if_stmt
+ | comment_stmt
+ | config_stmt
+ | menuconfig_stmt
+ | source_stmt
+;
+
+option_error:
+ T_WORD error T_EOL { zconf_error("unknown option \"%s\"", $1); }
+ | error T_EOL { zconf_error("invalid option"); }
+;
+
+
+/* config/menuconfig entry */
+
+config_entry_start: T_CONFIG T_WORD T_EOL
+{
+ struct symbol *sym = sym_lookup($2, 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), $2);
+};
+
+config_stmt: config_entry_start config_option_list
+{
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+};
+
+menuconfig_entry_start: T_MENUCONFIG T_WORD T_EOL
+{
+ struct symbol *sym = sym_lookup($2, 0);
+ sym->flags |= SYMBOL_OPTIONAL;
+ menu_add_entry(sym);
+ printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), $2);
+};
+
+menuconfig_stmt: menuconfig_entry_start config_option_list
+{
+ if (current_entry->prompt)
+ current_entry->prompt->type = P_MENU;
+ else
+ zconfprint("warning: menuconfig statement without prompt");
+ menu_end_entry();
+ printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
+};
+
+config_option_list:
+ /* empty */
+ | config_option_list config_option
+ | config_option_list depends
+ | config_option_list help
+ | config_option_list option_error
+ | config_option_list T_EOL
+;
+
+config_option: T_TYPE prompt_stmt_opt T_EOL
+{
+ menu_set_type($1->stype);
+ printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ $1->stype);
+};
+
+config_option: T_PROMPT prompt if_expr T_EOL
+{
+ menu_add_prompt(P_PROMPT, $2, $3);
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+};
+
+config_option: T_DEFAULT expr if_expr T_EOL
+{
+ menu_add_expr(P_DEFAULT, $2, $3);
+ if ($1->stype != S_UNKNOWN)
+ menu_set_type($1->stype);
+ printd(DEBUG_PARSE, "%s:%d:default(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ $1->stype);
+};
+
+config_option: T_SELECT T_WORD if_expr T_EOL
+{
+ menu_add_symbol(P_SELECT, sym_lookup($2, 0), $3);
+ printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
+};
+
+config_option: T_RANGE symbol symbol if_expr T_EOL
+{
+ menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,$2, $3), $4);
+ printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
+};
+
+/* choice entry */
+
+choice: T_CHOICE T_EOL
+{
+ struct symbol *sym = sym_lookup(NULL, 0);
+ sym->flags |= SYMBOL_CHOICE;
+ menu_add_entry(sym);
+ menu_add_expr(P_CHOICE, NULL, NULL);
+ printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
+};
+
+choice_entry: choice choice_option_list
+{
+ $$ = menu_add_menu();
+};
+
+choice_end: end
+{
+ if (zconf_endtoken($1, T_CHOICE, T_ENDCHOICE)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
+ }
+};
+
+choice_stmt: choice_entry choice_block choice_end
+;
+
+choice_option_list:
+ /* empty */
+ | choice_option_list choice_option
+ | choice_option_list depends
+ | choice_option_list help
+ | choice_option_list T_EOL
+ | choice_option_list option_error
+;
+
+choice_option: T_PROMPT prompt if_expr T_EOL
+{
+ menu_add_prompt(P_PROMPT, $2, $3);
+ printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
+};
+
+choice_option: T_TYPE prompt_stmt_opt T_EOL
+{
+ if ($1->stype == S_BOOLEAN || $1->stype == S_TRISTATE) {
+ menu_set_type($1->stype);
+ printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
+ zconf_curname(), zconf_lineno(),
+ $1->stype);
+ } else
+ YYERROR;
+};
+
+choice_option: T_OPTIONAL T_EOL
+{
+ current_entry->sym->flags |= SYMBOL_OPTIONAL;
+ printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
+};
+
+choice_option: T_DEFAULT T_WORD if_expr T_EOL
+{
+ if ($1->stype == S_UNKNOWN) {
+ menu_add_symbol(P_DEFAULT, sym_lookup($2, 0), $3);
+ printd(DEBUG_PARSE, "%s:%d:default\n",
+ zconf_curname(), zconf_lineno());
+ } else
+ YYERROR;
+};
+
+choice_block:
+ /* empty */
+ | choice_block common_stmt
+;
+
+/* if entry */
+
+if_entry: T_IF expr nl
+{
+ printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
+ menu_add_entry(NULL);
+ menu_add_dep($2);
+ $$ = menu_add_menu();
+};
+
+if_end: end
+{
+ if (zconf_endtoken($1, T_IF, T_ENDIF)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
+ }
+};
+
+if_stmt: if_entry if_block if_end
+;
+
+if_block:
+ /* empty */
+ | if_block common_stmt
+ | if_block menu_stmt
+ | if_block choice_stmt
+;
+
+/* menu entry */
+
+menu: T_MENU prompt T_EOL
+{
+ menu_add_entry(NULL);
+ menu_add_prompt(P_MENU, $2, NULL);
+ printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
+};
+
+menu_entry: menu depends_list
+{
+ $$ = menu_add_menu();
+};
+
+menu_end: end
+{
+ if (zconf_endtoken($1, T_MENU, T_ENDMENU)) {
+ menu_end_menu();
+ printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
+ }
+};
+
+menu_stmt: menu_entry menu_block menu_end
+;
+
+menu_block:
+ /* empty */
+ | menu_block common_stmt
+ | menu_block menu_stmt
+ | menu_block choice_stmt
+;
+
+source_stmt: T_SOURCE prompt T_EOL
+{
+ printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), $2);
+ zconf_nextfile($2);
+};
+
+/* comment entry */
+
+comment: T_COMMENT prompt T_EOL
+{
+ menu_add_entry(NULL);
+ menu_add_prompt(P_COMMENT, $2, NULL);
+ printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
+};
+
+comment_stmt: comment depends_list
+{
+ menu_end_entry();
+};
+
+/* help option */
+
+help_start: T_HELP T_EOL
+{
+ printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
+ zconf_starthelp();
+};
+
+help: help_start T_HELPTEXT
+{
+ current_entry->sym->help = $2;
+};
+
+/* depends option */
+
+depends_list:
+ /* empty */
+ | depends_list depends
+ | depends_list T_EOL
+ | depends_list option_error
+;
+
+depends: T_DEPENDS T_ON expr T_EOL
+{
+ menu_add_dep($3);
+ printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
+}
+ | T_DEPENDS expr T_EOL
+{
+ menu_add_dep($2);
+ printd(DEBUG_PARSE, "%s:%d:depends\n", zconf_curname(), zconf_lineno());
+}
+ | T_REQUIRES expr T_EOL
+{
+ menu_add_dep($2);
+ printd(DEBUG_PARSE, "%s:%d:requires\n", zconf_curname(), zconf_lineno());
+};
+
+/* prompt statement */
+
+prompt_stmt_opt:
+ /* empty */
+ | prompt if_expr
+{
+ menu_add_prompt(P_PROMPT, $1, $2);
+};
+
+prompt: T_WORD
+ | T_WORD_QUOTE
+;
+
+end: T_ENDMENU T_EOL { $$ = $1; }
+ | T_ENDCHOICE T_EOL { $$ = $1; }
+ | T_ENDIF T_EOL { $$ = $1; }
+;
+
+nl:
+ T_EOL
+ | nl T_EOL
+;
+
+if_expr: /* empty */ { $$ = NULL; }
+ | T_IF expr { $$ = $2; }
+;
+
+expr: symbol { $$ = expr_alloc_symbol($1); }
+ | symbol T_EQUAL symbol { $$ = expr_alloc_comp(E_EQUAL, $1, $3); }
+ | symbol T_UNEQUAL symbol { $$ = expr_alloc_comp(E_UNEQUAL, $1, $3); }
+ | T_OPEN_PAREN expr T_CLOSE_PAREN { $$ = $2; }
+ | T_NOT expr { $$ = expr_alloc_one(E_NOT, $2); }
+ | expr T_OR expr { $$ = expr_alloc_two(E_OR, $1, $3); }
+ | expr T_AND expr { $$ = expr_alloc_two(E_AND, $1, $3); }
+;
+
+symbol: T_WORD { $$ = sym_lookup($1, 0); free($1); }
+ | T_WORD_QUOTE { $$ = sym_lookup($1, 1); free($1); }
+;
+
+%%
+
+void conf_parse(const char *name)
+{
+ struct symbol *sym;
+ int i;
+
+ zconf_initscan(name);
+
+ sym_init();
+ menu_init();
+ modules_sym = sym_lookup("MODULES", 0);
+ rootmenu.prompt = menu_add_prompt(P_MENU, "LWK Configuration", NULL);
+
+#if YYDEBUG
+ if (getenv("ZCONF_DEBUG"))
+ zconfdebug = 1;
+#endif
+ zconfparse();
+ if (zconfnerrs)
+ exit(1);
+ menu_finalize(&rootmenu);
+ for_all_symbols(i, sym) {
+ sym_check_deps(sym);
+ }
+
+ sym_change_count = 1;
+}
+
+const char *zconf_tokenname(int token)
+{
+ switch (token) {
+ case T_MENU: return "menu";
+ case T_ENDMENU: return "endmenu";
+ case T_CHOICE: return "choice";
+ case T_ENDCHOICE: return "endchoice";
+ case T_IF: return "if";
+ case T_ENDIF: return "endif";
+ case T_DEPENDS: return "depends";
+ }
+ return "<token>";
+}
+
+static bool zconf_endtoken(struct kconf_id *id, int starttoken, int endtoken)
+{
+ if (id->token != endtoken) {
+ zconf_error("unexpected '%s' within %s block",
+ kconf_id_strings + id->name, zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ if (current_menu->file != current_file) {
+ zconf_error("'%s' in different file than '%s'",
+ kconf_id_strings + id->name, zconf_tokenname(starttoken));
+ fprintf(stderr, "%s:%d: location of the '%s'\n",
+ current_menu->file->name, current_menu->lineno,
+ zconf_tokenname(starttoken));
+ zconfnerrs++;
+ return false;
+ }
+ return true;
+}
+
+static void zconfprint(const char *err, ...)
+{
+ va_list ap;
+
+ fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno());
+ va_start(ap, err);
+ vfprintf(stderr, err, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+static void zconf_error(const char *err, ...)
+{
+ va_list ap;
+
+ zconfnerrs++;
+ fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno());
+ va_start(ap, err);
+ vfprintf(stderr, err, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+static void zconferror(const char *err)
+{
+#if YYDEBUG
+ fprintf(stderr, "%s:%d: %s\n", zconf_curname(), zconf_lineno() + 1, err);
+#endif
+}
+
+void print_quoted_string(FILE *out, const char *str)
+{
+ const char *p;
+ int len;
+
+ putc('"', out);
+ while ((p = strchr(str, '"'))) {
+ len = p - str;
+ if (len)
+ fprintf(out, "%.*s", len, str);
+ fputs("\\\"", out);
+ str = p + 1;
+ }
+ fputs(str, out);
+ putc('"', out);
+}
+
+void print_symbol(FILE *out, struct menu *menu)
+{
+ struct symbol *sym = menu->sym;
+ struct property *prop;
+
+ if (sym_is_choice(sym))
+ fprintf(out, "choice\n");
+ else
+ fprintf(out, "config %s\n", sym->name);
+ switch (sym->type) {
+ case S_BOOLEAN:
+ fputs(" boolean\n", out);
+ break;
+ case S_TRISTATE:
+ fputs(" tristate\n", out);
+ break;
+ case S_STRING:
+ fputs(" string\n", out);
+ break;
+ case S_INT:
+ fputs(" integer\n", out);
+ break;
+ case S_HEX:
+ fputs(" hex\n", out);
+ break;
+ default:
+ fputs(" ???\n", out);
+ break;
+ }
+ for (prop = sym->prop; prop; prop = prop->next) {
+ if (prop->menu != menu)
+ continue;
+ switch (prop->type) {
+ case P_PROMPT:
+ fputs(" prompt ", out);
+ print_quoted_string(out, prop->text);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_DEFAULT:
+ fputs( " default ", out);
+ expr_fprint(prop->expr, out);
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" if ", out);
+ expr_fprint(prop->visible.expr, out);
+ }
+ fputc('\n', out);
+ break;
+ case P_CHOICE:
+ fputs(" #choice value\n", out);
+ break;
+ default:
+ fprintf(out, " unknown prop %d!\n", prop->type);
+ break;
+ }
+ }
+ if (sym->help) {
+ int len = strlen(sym->help);
+ while (sym->help[--len] == '\n')
+ sym->help[len] = 0;
+ fprintf(out, " help\n%s\n", sym->help);
+ }
+ fputc('\n', out);
+}
+
+void zconfdump(FILE *out)
+{
+ struct property *prop;
+ struct symbol *sym;
+ struct menu *menu;
+
+ menu = rootmenu.list;
+ while (menu) {
+ if ((sym = menu->sym))
+ print_symbol(out, menu);
+ else if ((prop = menu->prompt)) {
+ switch (prop->type) {
+ case P_COMMENT:
+ fputs("\ncomment ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ case P_MENU:
+ fputs("\nmenu ", out);
+ print_quoted_string(out, prop->text);
+ fputs("\n", out);
+ break;
+ default:
+ ;
+ }
+ if (!expr_is_yes(prop->visible.expr)) {
+ fputs(" depends ", out);
+ expr_fprint(prop->visible.expr, out);
+ fputc('\n', out);
+ }
+ fputs("\n", out);
+ }
+
+ if (menu->list)
+ menu = menu->list;
+ else if (menu->next)
+ menu = menu->next;
+ else while ((menu = menu->parent)) {
+ if (menu->prompt && menu->prompt->type == P_MENU)
+ fputs("\nendmenu\n", out);
+ if (menu->next) {
+ menu = menu->next;
+ break;
+ }
+ }
+ }
+}
+
+#include "lex.zconf.c"
+#include "util.c"
+#include "confdata.c"
+#include "expr.c"
+#include "symbol.c"
+#include "menu.c"
--- /dev/null
+TARGET=$1
+ARCH=$2
+CC=$3
+
+# If compile.h exists already and we don't own autoconf.h
+# (i.e. we're not the same user who did make *config), don't
+# modify compile.h
+# So "sudo make install" won't change the "compiled by <user>"
+# do "compiled by root"
+
+if [ -r $TARGET -a ! -O include/linux/autoconf.h ]; then
+ echo " SKIPPED $TARGET"
+ exit 0
+fi
+
+# Do not expand names
+set -f
+
+if [ -r .version ]; then
+ VERSION=`cat .version`
+else
+ VERSION=0
+ echo 0 > .version
+fi
+
+
+UTS_VERSION="#$VERSION"
+CONFIG_FLAGS=""
+#if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+#if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
+UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS `LC_ALL=C LANG=C date`"
+
+# Truncate to maximum length
+
+UTS_LEN=64
+UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
+
+# Generate a temporary compile.h
+
+( echo /\* This file is auto generated, version $VERSION \*/
+ if [ -n "$CONFIG_FLAGS" ] ; then echo "/* $CONFIG_FLAGS */"; fi
+
+ echo \#define UTS_MACHINE \"$ARCH\"
+
+ echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\"
+
+ echo \#define LWK_COMPILE_TIME \"`LC_ALL=C LANG=C date +%T`\"
+ echo \#define LWK_COMPILE_BY \"`whoami`\"
+ echo \#define LWK_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\"
+
+ if [ -x /bin/dnsdomainname ]; then
+ echo \#define LWK_COMPILE_DOMAIN \"`dnsdomainname | $UTS_TRUNCATE`\"
+ elif [ -x /bin/domainname ]; then
+ echo \#define LWK_COMPILE_DOMAIN \"`domainname | $UTS_TRUNCATE`\"
+ else
+ echo \#define LWK_COMPILE_DOMAIN
+ fi
+
+ echo \#define LWK_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
+) > .tmpcompile
+
+# Only replace the real compile.h if the new one is different,
+# in order to preserve the timestamp and avoid unnecessary
+# recompilations.
+# We don't consider the file changed if only the date/time changed.
+# A kernel config change will increase the generation number, thus
+# causing compile.h to be updated (including date/time) due to the
+# changed comment in the
+# first line.
+
+if [ -r $TARGET ] && \
+ grep -v 'UTS_VERSION\|LWK_COMPILE_TIME' $TARGET > .tmpver.1 && \
+ grep -v 'UTS_VERSION\|LWK_COMPILE_TIME' .tmpcompile > .tmpver.2 && \
+ cmp -s .tmpver.1 .tmpver.2; then
+ rm -f .tmpcompile
+else
+ echo " UPD $TARGET"
+ mv -f .tmpcompile $TARGET
+fi
+rm -f .tmpver.1 .tmpver.2
--- /dev/null
+#!/bin/sh -x
+# Based on the vmlinux file create the System.map file
+# System.map is used by module-init tools and some debugging
+# tools to retrieve the actual addresses of symbols in the kernel.
+#
+# Usage
+# mksysmap vmlinux System.map
+
+
+#####
+# Generate System.map (actual filename passed as second argument)
+
+# $NM produces the following output:
+# f0081e80 T alloc_vfsmnt
+
+# The second row specify the type of the symbol:
+# A = Absolute
+# B = Uninitialised data (.bss)
+# C = Comon symbol
+# D = Initialised data
+# G = Initialised data for small objects
+# I = Indirect reference to another symbol
+# N = Debugging symbol
+# R = Read only
+# S = Uninitialised data for small objects
+# T = Text code symbol
+# U = Undefined symbol
+# V = Weak symbol
+# W = Weak symbol
+# Corresponding small letters are local symbols
+
+# For System.map filter away:
+# a - local absolute symbols
+# U - undefined global symbols
+# w - local weak symbols
+
+# readprofile starts reading symbols when _stext is found, and
+# continue until it finds a symbol which is not either of 'T', 't',
+# 'W' or 'w'. __crc_ are 'A' and placed in the middle
+# so we just ignore them to let readprofile continue to work.
+# (At least sparc64 has __crc_ in the middle).
+
+$NM -n $1 | grep -v '\( [aUw] \)\|\(__crc_\)\|\( \$[adt]\)' > $2
+
--- /dev/null
+#!/bin/bash
+
+# Cross-Compiler Toolchain for ${PLATFORM}
+# by Martin Decky <martin@decky.cz>
+#
+# GPL'ed, copyleft
+#
+
+
+check_error() {
+ if [ "$1" -ne "0" ]; then
+ echo
+ echo "Script failed: $2"
+ exit
+ fi
+}
+
+BINUTILS_VERSION="2.17"
+GCC_VERSION="4.1.1"
+
+BINUTILS="binutils-${BINUTILS_VERSION}.tar.gz"
+GCC_CORE="gcc-core-${GCC_VERSION}.tar.bz2"
+GCC_CPP="gcc-g++-${GCC_VERSION}.tar.bz2"
+
+BINUTILS_SOURCE="ftp://ftp.gnu.org/gnu/binutils/"
+GCC_SOURCE="ftp://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/"
+
+PLATFORM="x86_64"
+WORKDIR=`pwd`
+TARGET="${PLATFORM}-linux-gnu"
+PREFIX="/opt/toolchain/${PLATFORM}"
+BINUTILSDIR="${WORKDIR}/binutils-${BINUTILS_VERSION}"
+GCCDIR="${WORKDIR}/gcc-${GCC_VERSION}"
+OBJDIR="${WORKDIR}/gcc-obj"
+
+echo ">>> Downloading tarballs"
+
+if [ ! -f "${BINUTILS}" ]; then
+ wget -c "${BINUTILS_SOURCE}${BINUTILS}"
+ check_error $? "Error downloading binutils."
+fi
+if [ ! -f "${GCC_CORE}" ]; then
+ wget -c "${GCC_SOURCE}${GCC_CORE}"
+ check_error $? "Error downloading GCC Core."
+fi
+if [ ! -f "${GCC_CPP}" ]; then
+ wget -c "${GCC_SOURCE}${GCC_CPP}"
+ check_error $? "Error downloading GCC C++."
+fi
+
+echo ">>> Creating destionation directory"
+if [ ! -d "${PREFIX}" ]; then
+ mkdir -p "${PREFIX}"
+ test -d "${PREFIX}"
+ check_error $? "Unable to create ${PREFIX}."
+fi
+
+echo ">>> Creating GCC work directory"
+if [ ! -d "${OBJDIR}" ]; then
+ mkdir -p "${OBJDIR}"
+ test -d "${OBJDIR}"
+ check_error $? "Unable to create ${OBJDIR}."
+fi
+
+echo ">>> Unpacking tarballs"
+tar -xvzf "${BINUTILS}"
+check_error $? "Error unpacking binutils."
+tar -xvjf "${GCC_CORE}"
+check_error $? "Error unpacking GCC Core."
+tar -xvjf "${GCC_CPP}"
+check_error $? "Error unpacking GCC C++."
+
+echo ">>> Compiling and installing binutils"
+cd "${BINUTILSDIR}"
+check_error $? "Change directory failed."
+./configure "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" "--disable-nls"
+check_error $? "Error configuring binutils."
+make all install
+check_error $? "Error compiling/installing binutils."
+
+echo ">>> Compiling and installing GCC"
+cd "${OBJDIR}"
+check_error $? "Change directory failed."
+"${GCCDIR}/configure" "--target=${TARGET}" "--prefix=${PREFIX}" "--program-prefix=${TARGET}-" --with-gnu-as --with-gnu-ld --disable-nls --disable-threads --enable-languages=c,c++ --disable-multilib --disable-libgcj --without-headers --disable-shared
+check_error $? "Error configuring GCC."
+PATH="${PATH}:${PREFIX}/bin" make all-gcc install-gcc
+check_error $? "Error compiling/installing GCC."
+
+echo
+echo ">>> Cross-compiler for ${TARGET} installed."
--- /dev/null
+# Copyright (c) 2008, Sandia National Laboratories
+
+#
+# The build rules are modelled on the Linux style less-verbose
+# build by default, but with full details if V=1 flag is
+# set in the environment or on the build line.
+#
+# Typical usage:
+# $(call build,FOO,$filename,\
+# foo -flags $filename \
+# )
+#
+build = \
+ @if [ "$(V)" != 1 ]; then \
+ echo ' $1'; \
+ else \
+ echo "$2"; \
+ fi; \
+ $2
+
+#
+# Generate a library .a file from a list of object files.
+# For consistency of symbol ordering, we do not use ar to do any
+# updates of the library, but instead remove the old one and
+# re-generate it from all of its input object files.
+#
+# Typical usage:
+# $(call buildlib,libfoo.a,$(FILES))
+#
+buildlib = \
+ $(call build,AR $1,\
+ $(RM) $1; \
+ $(AR) crs $1 $2; \
+ )
+
+buildprog = \
+ $(call build,LD $1,\
+ $(RM) $1; \
+ $(CC) -static -o $1 $2; \
+ )
+
+#
+# Build the .o files from the sources.
+#
+%.o: %.c
+ $(call build,CC $@,$(CC) $(CFLAGS) $(INCLUDES) -c -o $@ $<)
+
+all: $(PROGS-y) $(LIBS-y)
+
+clean: FORCE
+ $(call build,CLEAN $(PROGS-y) $(LIBS-y) $(OBJS-y), \
+ $(RM) $(PROGS-y) $(LIBS-y) $(OBJS-y); \
+ )
+
+FORCE:
--- /dev/null
+CFLAGS=-O2
+
+all:
+ make -C liblwk
+ make -C hello_world
+
+clean:
+ make -C liblwk clean
+ make -C hello_world clean
--- /dev/null
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it. You can use it for
+your libraries, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library. If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+\f
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software. To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+ Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+ The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+ Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries. We
+concluded that weaker conditions might promote sharing better.
+
+ However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves. This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them. (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.) The hope is that this
+will lead to faster development of free libraries.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+ Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+\f
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License"). Each licensee is
+addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+\f
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+\f
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+\f
+ 6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+\f
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+\f
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+\f
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+\f
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
--- /dev/null
+TARGET = hello_world
+
+PROGS-y += $(TARGET)
+OBJS-y += hello_world.o
+
+INCLUDES += -I../liblwk/include -I../../include
+
+$(TARGET): $(OBJS-y) ../liblwk/liblwk.a
+ $(call buildprog,$@,$(OBJS-y) ../liblwk/liblwk.a)
+
+include ../Make.rules
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <limits.h>
+#include <lwk/liblwk.h>
+
+static void pmem_api_test(void);
+static void aspace_api_test(void);
+
+int
+main(int argc, char *argv[], char *envp[])
+{
+ int i;
+ id_t aspace_id;
+
+ printf("Hello, world!\n");
+
+ printf("Arguments:\n");
+ for (i = 0; i < argc; i++)
+ printf(" argv[%d] = %s\n", i, argv[i]);
+
+ printf("Environment Variables:\n");
+ for (i = 0; envp[i] != NULL; i++)
+ printf(" envp[%d] = %s\n", i, envp[i]);
+
+ pmem_api_test();
+ aspace_api_test();
+
+ printf("Spinning forever...\n");
+ while (1) {}
+}
+
+static void
+pmem_api_test(void)
+{
+ struct pmem_region query, result;
+ unsigned long bytes_umem = 0;
+ int status;
+
+ printf("TEST BEGIN: Physical Memory Management\n");
+
+ query.start = 0;
+ query.end = ULONG_MAX;
+ pmem_region_unset_all(&query);
+
+ printf(" Physical Memory Map:\n");
+ while ((status = pmem_query(&query, &result)) == 0) {
+ printf(" [%#016lx, %#016lx) %-11s\n",
+ result.start,
+ result.end,
+ (result.type_is_set)
+ ? pmem_type_to_string(result.type)
+ : "UNSET"
+ );
+
+ if (result.type == PMEM_TYPE_UMEM)
+ bytes_umem += (result.end - result.start);
+
+ query.start = result.end;
+ }
+
+ if (status != -ENOENT) {
+ printf("ERROR: pmem_query() status=%d\n", status);
+ }
+
+ printf(" Total User-Level Managed Memory: %lu bytes\n", bytes_umem);
+
+ printf("TEST END: Physical Memory Management\n");
+}
+
+static void
+aspace_api_test(void)
+{
+ int status;
+ id_t my_id, new_id;
+
+ printf("TEST BEGIN: Address Space Management\n");
+
+ if ((status = aspace_get_myid(&my_id)) != 0)
+ printf("ERROR: aspace_get_myid() status=%d\n", status);
+ else
+ printf(" My address space ID is %u\n", my_id);
+
+ printf(" Creating a new aspace: ");
+
+ status = aspace_create(ANY_ID, "TEST-ASPACE", &new_id);
+ if (status)
+ printf("\nERROR: aspace_create() status=%d\n", status);
+ else
+ printf("id=%u\n", new_id);
+
+ printf(" Using SMARTMAP to map myself into aspace %u\n", new_id);
+ status = aspace_smartmap(my_id, new_id, SMARTMAP_ALIGN, SMARTMAP_ALIGN);
+ if (status) printf("ERROR: aspace_smartmap() status=%d\n", status);
+
+ aspace_dump2console(new_id);
+
+ status = aspace_unsmartmap(my_id, new_id);
+ if (status) printf("ERROR: aspace_unsmartmap() status=%d\n", status);
+
+ printf(" Destroying a aspace %u: ", new_id);
+ status = aspace_destroy(new_id);
+ if (status)
+ printf("ERROR: aspace_destroy() status=%d\n", status);
+ else
+ printf("OK\n");
+
+ printf("TEST END: Address Space Management\n");
+}
--- /dev/null
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it. You can use it for
+your libraries, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library. If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+\f
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software. To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+ Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+ The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+ Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries. We
+concluded that weaker conditions might promote sharing better.
+
+ However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves. This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them. (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.) The hope is that this
+will lead to faster development of free libraries.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+ Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+\f
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License"). Each licensee is
+addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+\f
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+\f
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+\f
+ 6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+\f
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+\f
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+\f
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+\f
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
--- /dev/null
+TARGET = liblwk.a
+
+LIBS-y += $(TARGET)
+OBJS-y += syscalls.o pmem.o elf.o aspace.o
+
+INCLUDES += -I./include -I../../include
+
+$(TARGET): $(OBJS-y)
+ $(call buildlib,$@,$(OBJS-y))
+
+include ../Make.rules
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <lwk/liblwk.h>
+
+int
+aspace_map_region(
+ id_t id,
+ vaddr_t start,
+ size_t extent,
+ vmflags_t flags,
+ vmpagesize_t pagesz,
+ const char * name,
+ paddr_t pmem
+)
+{
+ int status;
+
+ if ((status = aspace_add_region(id, start, extent, flags, pagesz, name)))
+ return status;
+
+ if ((status = aspace_map_pmem(id, pmem, start, extent))) {
+ aspace_del_region(id, start, extent);
+ return status;
+ }
+
+ return 0;
+}
+
+int
+aspace_map_region_anywhere(
+ id_t id,
+ vaddr_t * start,
+ size_t extent,
+ vmflags_t flags,
+ vmpagesize_t pagesz,
+ const char * name,
+ paddr_t pmem
+)
+{
+ int status;
+
+retry:
+ if ((status = aspace_find_hole(id, 0, extent, pagesz, start)))
+ return status;
+
+ if ((status = aspace_add_region(id, *start, extent, flags, pagesz, name))) {
+ if (status == -ENOTUNIQ)
+ goto retry; /* we lost a race with someone */
+ return status;
+ }
+
+ if ((status = aspace_map_pmem(id, pmem, *start, extent))) {
+ aspace_del_region(id, *start, extent);
+ return status;
+ }
+
+ return 0;
+}
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <lwk/liblwk.h>
+#include <lwk/ctype.h>
+
+/**
+ * Verifies that an ELF header is sane.
+ * Returns 0 if header is sane and random non-zero values if header is insane.
+ */
+int
+elf_check_hdr(const struct elfhdr *hdr)
+{
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0) {
+ print(TYPE_ERR "bad e_ident %#x\n",
+ *((unsigned int *)hdr->e_ident));
+ return -1;
+ }
+ if (hdr->e_ident[EI_CLASS] != ELF_CLASS) {
+ print(TYPE_ERR "bad e_ident[EI_CLASS] %#x\n",
+ (unsigned int)hdr->e_ident[EI_CLASS]);
+ return -1;
+ }
+ if (hdr->e_ident[EI_DATA] != ELF_DATA) {
+ print(TYPE_ERR "bad e_ident[EI_DATA] %#x\n",
+ (unsigned int)hdr->e_ident[EI_DATA]);
+ return -1;
+ }
+ if (hdr->e_ident[EI_VERSION] != EV_CURRENT) {
+ print(TYPE_ERR "bad e_ident[EI_VERSION] %#x\n",
+ (unsigned int)hdr->e_ident[EI_VERSION]);
+ return -1;
+ }
+ if (hdr->e_ident[EI_OSABI] != ELF_OSABI) {
+ print(TYPE_ERR "bad e_dent[EI_OSABI] %#x\n",
+ (unsigned int)hdr->e_ident[EI_OSABI]);
+ return -1;
+ }
+ if (hdr->e_type != ET_EXEC) {
+ print(TYPE_ERR "bad e_type %#x\n",
+ (unsigned int)hdr->e_type);
+ return -1;
+ }
+ if (hdr->e_machine != ELF_ARCH) {
+ print(TYPE_ERR "bad e_machine %#x\n",
+ (unsigned int)hdr->e_machine);
+ return -1;
+ }
+ if (hdr->e_version != EV_CURRENT) {
+ print(TYPE_ERR "bad e_version %#x\n",
+ (unsigned int)hdr->e_version);
+ return -1;
+ }
+ if (hdr->e_flags != 0) {
+ print(TYPE_ERR "bad e_flags %#x\n",
+ (unsigned int)hdr->e_flags);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Prints the contents of an ELF file header to the console.
+ */
+void
+elf_print_elfhdr(const struct elfhdr *hdr)
+{
+ print(TYPE_NORM "ELF File Header:\n");
+ print(TYPE_NORM " type %0#10x\n", (unsigned int) hdr->e_type );
+ print(TYPE_NORM " machine %0#10x\n", (unsigned int) hdr->e_machine );
+ print(TYPE_NORM " version %0#10x\n", (unsigned int) hdr->e_version );
+ print(TYPE_NORM " entry %0#18lx\n", (unsigned long) hdr->e_entry );
+ print(TYPE_NORM " phoff %0#18lx\n", (unsigned long) hdr->e_phoff );
+ print(TYPE_NORM " shoff %0#18lx\n", (unsigned long) hdr->e_shoff );
+ print(TYPE_NORM " flags %0#10x\n", (unsigned int) hdr->e_flags );
+ print(TYPE_NORM " ehsize %0#10x\n", (unsigned int) hdr->e_ehsize );
+ print(TYPE_NORM " phentsize %0#10x\n", (unsigned int) hdr->e_phentsize );
+ print(TYPE_NORM " phnum %0#10x\n", (unsigned int) hdr->e_phnum );
+ print(TYPE_NORM " shentsize %0#10x\n", (unsigned int) hdr->e_shentsize );
+ print(TYPE_NORM " shnum %0#10x\n", (unsigned int) hdr->e_shnum );
+ print(TYPE_NORM " shstrndx %0#10x\n", (unsigned int) hdr->e_shstrndx );
+}
+
+/**
+ * Prints the contents of an ELF program header to the console.
+ */
+void
+elf_print_phdr(const struct elf_phdr *hdr)
+{
+ char *name;
+
+ switch (hdr->p_type) {
+ case PT_NULL: name = "NULL"; break;
+ case PT_LOAD: name = "LOAD"; break;
+ case PT_DYNAMIC: name = "DYNAMIC"; break;
+ case PT_INTERP: name = "INTERP"; break;
+ case PT_NOTE: name = "NOTE"; break;
+ case PT_SHLIB: name = "SHLIB"; break;
+ case PT_PHDR: name = "PHDR"; break;
+ case PT_LOPROC: name = "LOPROC"; break;
+ case PT_HIPROC: name = "HIPROC"; break;
+ default: name = "UNDEFINED TYPE";
+ }
+
+ print(TYPE_NORM "ELF Program Segment Header:\n");
+ print(TYPE_NORM " type %s\n", name);
+ print(TYPE_NORM " flags %0#10x\n", (unsigned int) hdr->p_flags );
+ print(TYPE_NORM " offset %0#18lx\n", (unsigned long) hdr->p_offset );
+ print(TYPE_NORM " vaddr %0#18lx\n", (unsigned long) hdr->p_vaddr );
+ print(TYPE_NORM " paddr %0#18lx\n", (unsigned long) hdr->p_paddr );
+ print(TYPE_NORM " filesz %0#18lx\n", (unsigned long) hdr->p_filesz );
+ print(TYPE_NORM " memsz %0#18lx\n", (unsigned long) hdr->p_memsz );
+ print(TYPE_NORM " align %0#18lx\n", (unsigned long) hdr->p_align );
+}
+
+/**
+ * Converts ELF flags to the corresponding kernel memory subsystem flags.
+ */
+vmflags_t
+elf_pflags_to_vmflags(unsigned int elf_pflags)
+{
+ vmflags_t vmflags = VM_USER;
+ if ( elf_pflags & PF_R ) vmflags |= VM_READ;
+ if ( elf_pflags & PF_W ) vmflags |= VM_WRITE;
+ if ( elf_pflags & PF_X ) vmflags |= VM_EXEC;
+ return vmflags;
+}
+
+/**
+ * Determines an ELF executable's entry point... where to start executing.
+ * Note: The address returned is in the context of the executing ELF
+ * image, not an address within the passed in elf_image.
+ */
+vaddr_t
+elf_entry_point(const void *elf_image)
+{
+ const struct elfhdr *ehdr = elf_image;
+ return ehdr->e_entry;
+}
+
+/**
+ * Determines the address of an ELF executable's program header table.
+ * Note: The address returned is in the context of the executing ELF
+ * image, not an address within the passed in elf_image.
+ */
+vaddr_t
+elf_phdr_table_addr(const void *elf_image)
+{
+ const struct elfhdr *ehdr = elf_image;
+ struct elf_phdr *phdr_array, *phdr;
+ unsigned int i;
+
+ phdr_array = (struct elf_phdr *)(elf_image + ehdr->e_phoff);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdr_array[i];
+ if (phdr->p_type == PT_LOAD)
+ return phdr->p_vaddr - phdr->p_offset + ehdr->e_phoff;
+ }
+ return 0;
+}
+
+/**
+ * Returns the number of entries in an ELF executable's program header table.
+ */
+unsigned int
+elf_num_phdrs(const void *elf_image)
+{
+ const struct elfhdr *ehdr = elf_image;
+ return ehdr->e_phnum;
+}
+
+/**
+ * Determines where the UNIX heap should start for a given ELF executable.
+ * Note: The address returned is in the context of the executing ELF
+ * image, not an address relative to the passed in elf_image.
+ */
+vaddr_t
+elf_heap_start(const void *elf_image)
+{
+ const struct elfhdr *ehdr;
+ const struct elf_phdr *phdr_array;
+ const struct elf_phdr *phdr;
+ vaddr_t end, heap_start=0;
+ size_t i;
+
+ /* Locate the program header array (in this context) */
+ ehdr = elf_image;
+ phdr_array = (struct elf_phdr *)(elf_image + ehdr->e_phoff);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdr_array[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ /* Calculate the end of the LOAD segment in memory */
+ end = phdr->p_vaddr + phdr->p_memsz;
+
+ if (end > heap_start)
+ heap_start = end;
+ }
+
+ return heap_start;
+}
+
+/**
+ * Given an argument string like "arg1=foo arg2=bar", parses it into
+ * an argv[] or envp[] style array of string pointers. Useful for
+ * constructing the argv and envp arguments to elf_init_stack().
+ */
+int
+elf_init_str_array(
+ size_t size,
+ char * ptrs[],
+ char * str
+)
+{
+ size_t pos = 0;
+ char *tmp;
+
+ while (strlen(str)) {
+ /* move past white space */
+ while (*str && isspace(*str))
+ ++str;
+
+ tmp = str;
+
+ /* find the end of the string */
+ while (*str && !isspace(*str))
+ ++str;
+
+ *str++ = 0;
+
+ if (strlen(tmp)) {
+ if (pos == size - 1)
+ return -1;
+ ptrs[pos++] = tmp;
+ }
+
+ }
+ ptrs[pos] = "";
+ return 0;
+}
+
+/**
+ * Writes an auxiliary info table entry.
+ */
+static void
+write_aux(
+ struct aux_ent * table,
+ int index,
+ unsigned long id,
+ unsigned long val
+)
+{
+ table[index].id = id;
+ table[index].val = val;
+}
+
+/**
+ * Determines the value of the current stack pointer in the target
+ * address space. The sp argument is the stack pointer in this context
+ * (i.e., the context this code is executing in), stack_mapping is the
+ * address of the stack in this context, stack_start is the address
+ * of the stack in the target address space, and extent is the size
+ * of the stack.
+ */
+static vaddr_t
+sp_in_aspace(void *sp,
+ void *stack_mapping, vaddr_t stack_start, size_t extent)
+{
+ vaddr_t stack_end = (vaddr_t)stack_mapping + extent;
+ vaddr_t stack_vend = stack_start + extent;
+ size_t stack_offset = stack_end - (vaddr_t)sp;
+
+ return stack_vend - stack_offset;
+}
+
+/**
+ * Sets up the initial stack for a new task. This includes storing the
+ * argv[] argument array, envp[] environment array, and auxiliary info table
+ * to the top of the user stack in the format that the C library expects them.
+ * Eventually the arguments get passed to the application's
+ * main(argc, argv, envp) function.
+ *
+ * If successful, the initial stack pointer value that should be used when
+ * starting the new task is returned in >stack_ptr.
+ *
+ * This function sets up the initial stack as follows (stack grows down):
+ *
+ * Environment Strings
+ * Argument Strings
+ * Platform String
+ * Auxiliary Info Table
+ * envp[]
+ * argv[]
+ * argc
+ *
+ * Arguments:
+ * [IN] elf_image The ELF executable, needed to setup aux info.
+ * [IN] stack_mapping Where the stack is mapped in this context.
+ * [IN] stack_start Where the stack is located in the target aspace.
+ * [IN] stack_extent Size of the stack.
+ * [IN] argv[] Array of pointers to argument strings.
+ * [IN] envp[] Array of pointers to environment strings.
+ * [IN] uid User ID of the task
+ * [IN] gid Group ID of the task
+ * [IN] hwcap Hardware capability bitfield
+ * (used for AT_HWCAP entry in aux info table)
+ * [OUT] stack_ptr The initial stack pointer value for the new task.
+ * (note this is an address in the target aspace)
+ *
+ * Returns:
+ * Success: 0
+ * Failure: Error Code, stack may have been partially initialized
+ */
+int
+elf_init_stack(
+ void * elf_image,
+ void * stack_mapping,
+ vaddr_t stack_start,
+ size_t stack_extent,
+ char * argv[],
+ char * envp[],
+ uid_t uid,
+ gid_t gid,
+ uint32_t hwcap,
+ vaddr_t * stack_ptr
+)
+{
+ size_t i, len;
+ uintptr_t sp;
+ const char *platform_str = ELF_PLATFORM;
+ struct aux_ent auxv[AT_ENTRIES];
+ size_t argc=0, envc=0, auxc=0;
+ size_t arg_len=0, env_len=0, auxv_len=0;
+ size_t platform_str_len=0;
+
+ char *strings_sp;
+ char *platform_str_sp;
+ unsigned long *auxv_sp;
+ unsigned long *envp_sp;
+ unsigned long *argv_sp;
+ unsigned long *argc_sp;
+
+ /* Count # of arguments and their total string length */
+ while ((len = strlen(argv[argc])) != 0) {
+ arg_len += (len + 1);
+ ++argc;
+ }
+
+ /* Count # of environment variables and their total string length */
+ while ((len = strlen(envp[envc])) != 0) {
+ env_len += (len + 1);
+ ++envc;
+ }
+
+ /* Calculate length of the arch's platform string, if there is one */
+ if (platform_str)
+ platform_str_len = strlen(platform_str) + 1;
+
+ /* Make room on stack for arg, env, and platform strings */
+ sp = (uintptr_t)((vaddr_t)stack_mapping + stack_extent);
+ sp -= (arg_len + env_len + platform_str_len);
+ strings_sp = (void *) sp;
+
+ /* Build the auxilliary information table */
+ write_aux(auxv, auxc++, AT_HWCAP, hwcap);
+ write_aux(auxv, auxc++, AT_PAGESZ, ELF_EXEC_PAGESIZE);
+ write_aux(auxv, auxc++, AT_CLKTCK, 1000000l);
+ write_aux(auxv, auxc++, AT_PHDR, elf_phdr_table_addr(elf_image));
+ write_aux(auxv, auxc++, AT_PHENT, sizeof(struct elf_phdr));
+ write_aux(auxv, auxc++, AT_PHNUM, elf_num_phdrs(elf_image));
+ write_aux(auxv, auxc++, AT_BASE, 0);
+ write_aux(auxv, auxc++, AT_FLAGS, 0);
+ write_aux(auxv, auxc++, AT_ENTRY, elf_entry_point(elf_image));
+ write_aux(auxv, auxc++, AT_UID, uid);
+ write_aux(auxv, auxc++, AT_EUID, uid);
+ write_aux(auxv, auxc++, AT_GID, gid);
+ write_aux(auxv, auxc++, AT_EGID, gid);
+ write_aux(auxv, auxc++, AT_SECURE, 0);
+ if (platform_str) {
+ platform_str_sp = strings_sp;
+ write_aux(
+ auxv, auxc++, AT_PLATFORM,
+ sp_in_aspace(platform_str_sp,
+ stack_mapping, stack_start,
+ stack_extent)
+ );
+ }
+ write_aux(auxv, auxc++, AT_NULL, 0);
+
+ /* Make room on stack for aux info table */
+ auxv_len = auxc * sizeof(struct aux_ent);
+ sp -= auxv_len;
+
+ /* Make room on stack for argc, argv[], envp[] */
+ sp -= ((1 + (argc + 1) + (envc + 1)) * sizeof(unsigned long));
+
+ /* Align stack to 16-byte boundary */
+ sp = round_down(sp, 16);
+
+ /* Calculate stack address to store argc, argv[], envp[], and auxv[] */
+ argc_sp = (unsigned long *) sp;
+ argv_sp = argc_sp + 1;
+ envp_sp = argv_sp + argc + 1;
+ auxv_sp = envp_sp + envc + 1;
+
+ /* Store arch's platform string, if there is one */
+ if (platform_str) {
+ memcpy(strings_sp, platform_str, platform_str_len);
+ strings_sp += platform_str_len;
+ }
+
+ /* Store the auxiliary information array */
+ memcpy(auxv_sp, auxv, auxv_len);
+
+ /* Store argv[] */
+ for (i = 0; i < argc; i++) {
+ len = strlen(argv[i]) + 1;
+ memcpy(strings_sp, argv[i], len);
+ argv_sp[i] = sp_in_aspace(strings_sp,
+ stack_mapping, stack_start,
+ stack_extent),
+ strings_sp += len;
+ }
+ argv_sp[i] = 0; /* NULL terminate argv[] */
+
+ /* Store envp[] */
+ for (i = 0; i < envc; i++) {
+ len = strlen(envp[i]) + 1;
+ memcpy(strings_sp, envp[i], len);
+ envp_sp[i] = sp_in_aspace(strings_sp,
+ stack_mapping, stack_start,
+ stack_extent),
+ strings_sp += len;
+ }
+ envp_sp[i] = 0; /* NULL terminate argv[] */
+
+ /* Store argc */
+ *argc_sp = argc;
+
+ if (stack_ptr) {
+ *stack_ptr = sp_in_aspace((void *)sp,
+ stack_mapping, stack_start,
+ stack_extent);
+ }
+ return 0;
+}
+
+/**
+ * A "default" alloc_pmem() function for use with elf_load_executable().
+ * A user may wish to define a custom replacement alloc_pmem() function
+ * to, for example, keep track of the physical memory that is allocated.
+ */
+paddr_t
+elf_dflt_alloc_pmem(size_t size, size_t alignment, uintptr_t arg)
+{
+ struct pmem_region result;
+
+ if (pmem_alloc_umem(size, alignment, &result))
+ return 0;
+
+ /* Mark the memory as being used by the init task */
+ result.type = PMEM_TYPE_INIT_TASK;
+ BUG_ON(pmem_update(&result));
+
+ return result.start;
+}
+
+static int
+load_writable_segment(
+ void * elf_image,
+ struct elf_phdr * phdr,
+ id_t aspace_id,
+ vaddr_t start,
+ size_t extent,
+ vmpagesize_t pagesz,
+ uintptr_t alloc_pmem_arg,
+ paddr_t (*alloc_pmem)(size_t size, size_t alignment, uintptr_t arg)
+)
+{
+ int status;
+ paddr_t pmem;
+ vaddr_t local_start;
+ vaddr_t src, dst;
+ id_t my_aspace_id;
+
+ /* Figure out my address space ID */
+ if ((status = aspace_get_myid(&my_aspace_id)))
+ return status;
+
+ /* Allocate physical memory for the segment */
+ if (!(pmem = alloc_pmem(extent, pagesz, alloc_pmem_arg)))
+ return -ENOMEM;
+
+ /* Map the segment into the target address space */
+ status =
+ aspace_map_region(
+ aspace_id,
+ start,
+ extent,
+ elf_pflags_to_vmflags(phdr->p_flags),
+ pagesz,
+ "ELF",
+ pmem
+ );
+ if (status)
+ return status;
+
+ /* Map the segment into this address space */
+ status =
+ aspace_map_region_anywhere(
+ my_aspace_id,
+ &local_start,
+ extent,
+ (VM_USER|VM_READ|VM_WRITE),
+ pagesz,
+ "temporary",
+ pmem
+ );
+ if (status)
+ return status;
+
+ /* Copy segment data from ELF image into the target address space
+ * (via its temporary mapping in our address space) */
+ dst = local_start + (phdr->p_vaddr - start);
+ src = (vaddr_t)elf_image + phdr->p_offset;
+ memcpy((void *)dst, (void *)src, phdr->p_filesz);
+
+ /* Unmap the segment from this address space */
+ status = aspace_del_region(my_aspace_id, local_start, extent);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int
+load_readonly_segment(
+ paddr_t elf_image_paddr,
+ struct elf_phdr * phdr,
+ id_t aspace_id,
+ vaddr_t start,
+ size_t extent,
+ vmpagesize_t pagesz
+)
+{
+ return aspace_map_region(
+ aspace_id,
+ start,
+ extent,
+ elf_pflags_to_vmflags(phdr->p_flags),
+ pagesz,
+ "ELF (mapped)",
+ elf_image_paddr +
+ round_down(phdr->p_offset, pagesz)
+ );
+}
+
+/**
+ * Loads an ELF executable image into the specified address space.
+ *
+ * Arguments:
+ * [IN] elf_image: Location of ELF image in this address space.
+ * [IN] elf_image_paddr: Location of ELF image in physical memory.
+ * [IN] aspace_id: Address space to load ELF image into.
+ * [IN] pagesz: Page size to use when mapping ELF image.
+ * [IN] alloc_pmem_arg: Argument to pass to alloc_pmem().
+ * [IN] alloc_pmem: Function pointer to use to allocate physical
+ * memory for the region. alloc_mem() returns
+ * the physical address of the memory allocated.
+ *
+ * Returns:
+ * Success: 0
+ * Failure: Error Code, the target address space is left in an
+ * undefined state and should be destroyed.
+ */
+int
+elf_load_executable(
+ void * elf_image,
+ paddr_t elf_image_paddr,
+ id_t aspace_id,
+ vmpagesize_t pagesz,
+ uintptr_t alloc_pmem_arg,
+ paddr_t (*alloc_pmem)(size_t size, size_t alignment, uintptr_t arg)
+)
+{
+ struct elfhdr * ehdr;
+ struct elf_phdr * phdr_array;
+ struct elf_phdr * phdr;
+ size_t i;
+ vaddr_t start, end;
+ size_t extent;
+ size_t num_load_segments=0;
+ int status;
+
+ /* Locate the program header array (in this context) */
+ ehdr = elf_image;
+ phdr_array = (struct elf_phdr *)(elf_image + ehdr->e_phoff);
+
+ /* Set up a region for each program segment */
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdr_array[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ /* Calculate the segment's bounds */
+ start = round_down(phdr->p_vaddr, pagesz);
+ end = round_up(phdr->p_vaddr + phdr->p_memsz, pagesz);
+ extent = end - start;
+
+ if (phdr->p_flags & PF_W) {
+ /* Writable segments must be copied into the
+ * target address space */
+ status =
+ load_writable_segment(
+ elf_image,
+ phdr,
+ aspace_id,
+ start,
+ extent,
+ pagesz,
+ alloc_pmem_arg,
+ alloc_pmem
+ );
+ if (status)
+ return status;
+ } else {
+ /* Read-only segments are mapped directly
+ * from the ELF image */
+ status =
+ load_readonly_segment(
+ elf_image_paddr,
+ phdr,
+ aspace_id,
+ start,
+ extent,
+ pagesz
+ );
+ if (status)
+ return status;
+ }
+
+ ++num_load_segments;
+ }
+
+ return (num_load_segments) ? 0 : -ENOENT;
+}
+
+static int
+make_region(
+ id_t aspace_id,
+ vaddr_t start,
+ size_t extent,
+ vmflags_t flags,
+ vmpagesize_t pagesz,
+ const char * name,
+ uintptr_t alloc_pmem_arg,
+ paddr_t (*alloc_pmem)(size_t size, size_t alignment, uintptr_t arg),
+ paddr_t * pmem
+)
+{
+ int status;
+
+ *pmem = alloc_pmem(extent, pagesz, alloc_pmem_arg);
+ if (*pmem == 0) {
+ print("Failed to allocate physical memory for %s.", name);
+ return -ENOMEM;
+ }
+
+ status = aspace_map_region(aspace_id, start, extent,
+ flags, pagesz,
+ name, *pmem);
+ if (status) {
+ print("Failed to map physical memory for %s (status=%d).",
+ name, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * Maximum number of arguments and environment variables that may
+ * be passed to the new task created by elf_load().
+ */
+#define MAX_ARGC 32
+#define MAX_ENVC 32
+
+/**
+ * Kitchen-sink ELF image load function.
+ * If something more custom is desired, this function can be used as a guide
+ * for what needs to be done to load an ELF executable and setup the
+ * accompanying address space.
+ */
+int
+elf_load(
+ void * elf_image,
+ paddr_t elf_image_paddr,
+ const char * name,
+ id_t desired_aspace_id,
+ vmpagesize_t pagesz,
+ size_t heap_size,
+ size_t stack_size,
+ char * argv_str,
+ char * envp_str,
+ start_state_t * start_state,
+ uintptr_t alloc_pmem_arg,
+ paddr_t (*alloc_pmem)(size_t size, size_t alignment, uintptr_t arg)
+)
+{
+ int status;
+ char *argv[MAX_ARGC] = { (char *)name };
+ char *envp[MAX_ENVC];
+ id_t my_aspace_id, aspace_id;
+ vaddr_t heap_start, stack_start, stack_end, stack_ptr;
+ vaddr_t local_stack_start;
+ size_t heap_extent, stack_extent;
+ paddr_t heap_pmem, stack_pmem;
+ uint32_t hwcap;
+
+ if (!elf_image || !start_state || !alloc_pmem)
+ return -EINVAL;
+
+ if (elf_init_str_array(MAX_ARGC-1, argv+1, argv_str)) {
+ print("Too many ARGV strings.");
+ return -EINVAL;
+ }
+
+ if (elf_init_str_array(MAX_ENVC, envp, envp_str)) {
+ print("Too many ENVP strings.");
+ return -EINVAL;
+ }
+
+ if ((status = aspace_create(desired_aspace_id, "init_task", &aspace_id))) {
+ print("Failed to create aspace (status=%d).", status);
+ return status;
+ }
+
+ /* Load the ELF executable's LOAD segments */
+ status =
+ elf_load_executable(
+ elf_image, /* where I can access the ELF image */
+ elf_image_paddr, /* where it is in physical memory */
+ aspace_id, /* the address space to map it into */
+ pagesz, /* page size to map it with */
+ 0, /* arg to pass to alloc_pmem */
+ alloc_pmem /* func to use to allocate phys mem */
+ );
+ if (status) {
+ print("Failed to load ELF image (status=%d).", status);
+ return status;
+ }
+
+ /* Create the UNIX heap */
+ heap_start = round_up(elf_heap_start(elf_image), pagesz);
+ heap_extent = round_up(heap_size, pagesz);
+ status =
+ make_region(
+ aspace_id,
+ heap_start,
+ heap_extent,
+ (VM_USER|VM_READ|VM_WRITE|VM_EXEC|VM_HEAP),
+ pagesz,
+ "heap",
+ alloc_pmem_arg,
+ alloc_pmem,
+ &heap_pmem
+ );
+ if (status) {
+ print("Failed to create heap (status=%d).", status);
+ return status;
+ }
+
+ /* Create the stack region */
+ stack_end = SMARTMAP_ALIGN;
+ stack_start = round_down(stack_end - stack_size, pagesz);
+ stack_extent = stack_end - stack_start;
+ status =
+ make_region(
+ aspace_id,
+ stack_start,
+ stack_extent,
+ (VM_USER|VM_READ|VM_WRITE|VM_EXEC),
+ pagesz,
+ "stack",
+ alloc_pmem_arg,
+ alloc_pmem,
+ &stack_pmem
+ );
+ if (status) {
+ print("Failed to create stack (status=%d).", status);
+ return status;
+ }
+
+ /* Map the stack region into this address space */
+ if ((status = aspace_get_myid(&my_aspace_id)))
+ return status;
+ status =
+ aspace_map_region_anywhere(
+ my_aspace_id,
+ &local_stack_start,
+ stack_extent,
+ (VM_USER|VM_READ|VM_WRITE),
+ pagesz,
+ "temporary",
+ stack_pmem
+ );
+ if (status) {
+ print("Failed to map stack locally (status=%d).", status);
+ return status;
+ }
+
+ /* Initialize the stack */
+ status = elf_hwcap(start_state->cpu_id, &hwcap);
+ if (status) {
+ print("Failed to get hw capabilities (status=%d).", status);
+ return status;
+ }
+ status =
+ elf_init_stack(
+ elf_image,
+ (void *)local_stack_start, /* Where I can access it */
+ stack_start, /* Where it is in target aspace */
+ stack_extent,
+ argv, envp,
+ start_state->uid, start_state->gid,
+ hwcap,
+ &stack_ptr
+ );
+ if (status) {
+ print("Failed to initialize stack (status=%d).", status);
+ return status;
+ }
+
+ start_state->aspace_id = aspace_id;
+ start_state->entry_point = elf_entry_point(elf_image);
+ start_state->stack_ptr = stack_ptr;
+
+ return 0;
+}
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#ifndef _LIBLWK_H_
+#define _LIBLWK_H_
+
+#include <lwk/types.h>
+#include <lwk/errno.h>
+#include <lwk/print.h>
+#include <lwk/string.h>
+#include <lwk/pmem.h>
+#include <lwk/aspace.h>
+#include <lwk/task.h>
+#include <lwk/elf.h>
+#include <lwk/auxvec.h>
+
+#endif
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <lwk/liblwk.h>
+
+const char *
+pmem_type_to_string(pmem_type_t type)
+{
+ switch(type) {
+ case PMEM_TYPE_BOOTMEM: return "BOOTMEM"; break;
+ case PMEM_TYPE_BIGPHYSAREA: return "BIGPHYSAREA"; break;
+ case PMEM_TYPE_INITRD: return "INITRD"; break;
+ case PMEM_TYPE_INIT_TASK: return "INIT_TASK"; break;
+ case PMEM_TYPE_KMEM: return "KMEM"; break;
+ case PMEM_TYPE_UMEM: return "UMEM"; break;
+ }
+ return "UNKNOWN";
+}
+
+void
+pmem_region_unset_all(struct pmem_region *rgn)
+{
+ rgn->type_is_set = false;
+ rgn->lgroup_is_set = false;
+ rgn->allocated_is_set = false;
+ rgn->name_is_set = false;
+}
+
+int
+pmem_alloc_umem(size_t size, size_t alignment, struct pmem_region *rgn)
+{
+ struct pmem_region constraint, result;
+
+ /* Find and allocate a chunk of PMEM_TYPE_UMEM physical memory */
+ pmem_region_unset_all(&constraint);
+ constraint.start = 0;
+ constraint.end = (paddr_t)(-1);
+ constraint.type = PMEM_TYPE_UMEM; constraint.type_is_set = true;
+ constraint.allocated = false; constraint.allocated_is_set = true;
+
+ if (pmem_alloc(size, alignment, &constraint, &result))
+ return -ENOMEM;
+
+ *rgn = result;
+ return 0;
+}
+
+bool
+pmem_is_umem(paddr_t start, size_t extent)
+{
+ struct pmem_region query, result;
+ int status;
+
+ pmem_region_unset_all(&query);
+ query.start = start;
+ query.end = start + extent;
+ query.type = PMEM_TYPE_UMEM; query.type_is_set = true;
+ result.end = 0;
+
+ while ((status = pmem_query(&query, &result)) == 0) {
+ if (result.start != query.start)
+ return false;
+ if (result.end == query.end)
+ break;
+ query.start = result.end;
+ }
+ return (status) ? false : true;
+}
--- /dev/null
+/* Copyright (c) 2008, Sandia National Laboratories */
+
+#include <lwk/unistd.h>
+#include <lwk/liblwk.h>
+
+/**
+ * There is no way to specify inline assembly constraints for %r10 (arg4),
+ * %r8 (arg5), and %r9 (arg6), so the macros below specify the registers
+ * to use for local variables as a work-around.
+ *
+ * GCC BUG? -- For some unknown reason, the register specified to store
+ * a local variable is not always honored if the variable
+ * is assigned when it is declared. Work-around by declaring
+ * and assigning on separate lines.
+ */
+#define SYSCALL0(name) \
+int name(void) { \
+ int status; \
+ asm volatile( \
+ "syscall" \
+ : "=a" (status) \
+ : "0" (__NR_##name) \
+ : "memory", "rcx", "r11", "cc" \
+ ); \
+ return status; \
+}
+
+#define SYSCALL1(name, type1) \
+int name(type1 arg1) { \
+ int status; \
+ register type1 rdi asm("rdi"); \
+ rdi = arg1; \
+ asm volatile( \
+ "syscall" \
+ : "=a" (status) \
+ : "0" (__NR_##name), \
+ "r" (rdi) \
+ : "memory", "rcx", "r11", "cc" \
+ ); \
+ return status; \
+}
+
+#define SYSCALL2(name, type1, type2) \
+int name(type1 arg1, type2 arg2) { \
+ int status; \
+ register type1 rdi asm("rdi"); \
+ register type2 rsi asm("rsi"); \
+ rdi = arg1; \
+ rsi = arg2; \
+ asm volatile( \
+ "syscall" \
+ : "=a" (status) \
+ : "0" (__NR_##name), \
+ "r" (rdi), \
+ "r" (rsi) \
+ : "memory", "rcx", "r11", "cc" \
+ ); \
+ return status; \
+}
+
+#define SYSCALL3(name, type1, type2, type3) \
+int name(type1 arg1, type2 arg2, type3 arg3) { \
+ int status; \
+ register type1 rdi asm("rdi"); \
+ register type2 rsi asm("rsi"); \
+ register type3 rdx asm("rdx"); \
+ rdi = arg1; \
+ rsi = arg2; \
+ rdx = arg3; \
+ asm volatile( \
+ "syscall" \
+ : "=a" (status) \
+ : "0" (__NR_##name), \
+ "r" (rdi), \
+ "r" (rsi), \
+ "r" (rdx) \
+ : "memory", "rcx", "r11", "cc" \
+ ); \
+ return status; \
+}
+
+#define SYSCALL4(name, type1, type2, type3, type4) \
+int name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
+ int status; \
+ register type1 rdi asm("rdi"); \
+ register type2 rsi asm("rsi"); \
+ register type3 rdx asm("rdx"); \
+ register type4 r10 asm("r10"); \
+ rdi = arg1; \
+ rsi = arg2; \
+ rdx = arg3; \
+ r10 = arg4; \
+ asm volatile( \
+ "syscall" \
+ : "=a" (status) \
+ : "0" (__NR_##name), \
+ "r" (rdi), \
+ "r" (rsi), \
+ "r" (rdx), \
+ "r" (r10) \
+ : "memory", "rcx", "r11", "cc" \
+ ); \
+ return status; \
+}
+
+#define SYSCALL5(name, type1, type2, type3, type4, type5) \
+int name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
+ type5 arg5) { \
+ int status; \
+ register type1 rdi asm("rdi"); \
+ register type2 rsi asm("rsi"); \
+ register type3 rdx asm("rdx"); \
+ register type4 r10 asm("r10"); \
+ register type5 r8 asm("r8"); \
+ rdi = arg1; \
+ rsi = arg2; \
+ rdx = arg3; \
+ r10 = arg4; \
+ r8 = arg5; \
+ asm volatile( \
+ "syscall" \
+ : "=a" (status) \
+ : "0" (__NR_##name), \
+ "r" (rdi), \
+ "r" (rsi), \
+ "r" (rdx), \
+ "r" (r10), \
+ "r" (r8) \
+ : "memory", "rcx", "r11", "cc" \
+ ); \
+ return status; \
+}
+
+#define SYSCALL6(name, type1, type2, type3, type4, type5, type6)\
+int name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
+ type5 arg5, type6 arg6) { \
+ int status; \
+ register type1 rdi asm("rdi"); \
+ register type2 rsi asm("rsi"); \
+ register type3 rdx asm("rdx"); \
+ register type4 r10 asm("r10"); \
+ register type5 r8 asm("r8"); \
+ register type6 r9 asm("r9"); \
+ rdi = arg1; \
+ rsi = arg2; \
+ rdx = arg3; \
+ r10 = arg4; \
+ r8 = arg5; \
+ r9 = arg6; \
+ asm volatile( \
+ "syscall" \
+ : "=a" (status) \
+ : "0" (__NR_##name), \
+ "r" (rdi), \
+ "r" (rsi), \
+ "r" (rdx), \
+ "r" (r10), \
+ "r" (r8), \
+ "r" (r9) \
+ : "memory", "rcx", "r11", "cc" \
+ ); \
+ return status; \
+}
+
+/**
+ * Physical memory management.
+ */
+SYSCALL1(pmem_add, const struct pmem_region *);
+SYSCALL1(pmem_update, const struct pmem_region *);
+SYSCALL2(pmem_query, const struct pmem_region *, struct pmem_region *);
+SYSCALL4(pmem_alloc, size_t, size_t,
+ const struct pmem_region *, struct pmem_region *);
+
+/**
+ * Address space management.
+ */
+SYSCALL1(aspace_get_myid, id_t *);
+SYSCALL3(aspace_create, id_t, const char *, id_t *);
+SYSCALL1(aspace_destroy, id_t);
+SYSCALL5(aspace_find_hole, id_t, vaddr_t, size_t, size_t, vaddr_t *);
+SYSCALL6(aspace_add_region,
+ id_t, vaddr_t, size_t, vmflags_t, vmpagesize_t, const char *);
+SYSCALL3(aspace_del_region, id_t, vaddr_t, size_t);
+SYSCALL4(aspace_map_pmem, id_t, paddr_t, vaddr_t, size_t);
+SYSCALL3(aspace_unmap_pmem, id_t, vaddr_t, size_t);
+SYSCALL4(aspace_smartmap, id_t, id_t, vaddr_t, size_t);
+SYSCALL2(aspace_unsmartmap, id_t, id_t);
+SYSCALL1(aspace_dump2console, id_t);
+
+/**
+ * Task management.
+ */
+SYSCALL1(task_get_myid, id_t *);
+SYSCALL4(task_create, id_t, const char *, const start_state_t *, id_t *);
+SYSCALL1(task_exit, int);
+SYSCALL0(task_yield);
+
+/**
+ * ELF related system calls.
+ */
+SYSCALL2(elf_hwcap, id_t, uint32_t *);