[PATCH] Removes x86 warning messages

Brad Proctor (fredlie@sprynet.com)
Wed, 27 Oct 1999 22:56:23 -0400


This is a multi-part message in MIME format.
--------------8C219C33970E8A43B353D7AD
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit

Hi,

Here is my first patch, it's nothing special, but I have to start
somewhere, This patch simply removes the warning messages like the
following:
"warning: using `%eax' instead of '%ax' due to `l' suffix."
by changing all the 'l' suffixes to 'w' and adding 'w' where it is
missing.

This patch applies to 2.3.24.

Brad Proctor
--------------8C219C33970E8A43B353D7AD
Content-Type: text/plain; charset=us-ascii;
name="brad-991027.diff"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
filename="brad-991027.diff"

diff -urN linux-2.3.24/arch/i386/boot/compressed/head.S linux/arch/i386/boot/compressed/head.S
--- linux-2.3.24/arch/i386/boot/compressed/head.S Wed Oct 27 20:36:21 1999
+++ linux/arch/i386/boot/compressed/head.S Wed Oct 27 21:07:43 1999
@@ -37,10 +37,10 @@
cld
cli
movl $(__KERNEL_DS),%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%fs
- movl %ax,%gs
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%fs
+ movw %ax,%gs
#ifdef __SMP__
orw %bx,%bx # What state are we in BX=1 for SMP
# 0 for boot
@@ -49,9 +49,9 @@
/*
* We are trampolining an SMP processor
*/
- mov %ax,%ss
+ movw %ax,%ss
xorl %eax,%eax # Back to 0
- mov %cx,%ax # SP low 16 bits
+ movw %cx,%ax # SP low 16 bits
movl %eax,%esp
pushl 0 # Clear NT
popfl
diff -urN linux-2.3.24/arch/i386/boot/setup.S linux/arch/i386/boot/setup.S
--- linux-2.3.24/arch/i386/boot/setup.S Wed Oct 27 20:36:21 1999
+++ linux/arch/i386/boot/setup.S Wed Oct 27 21:17:17 1999
@@ -228,7 +228,7 @@
jmp good_sig

no_sig:
- lea no_sig_mess, %si
+ leaw no_sig_mess, %si
call prtstr

no_sig_loop:
@@ -248,7 +248,7 @@

pushw %cs # No, we have an old loader,
popw %ds # die.
- lea loader_panic_mess, %si
+ leaw loader_panic_mess, %si
call prtstr

jmp no_sig_loop
diff -urN linux-2.3.24/arch/i386/boot/video.S linux/arch/i386/boot/video.S
--- linux-2.3.24/arch/i386/boot/video.S Wed Oct 27 20:36:22 1999
+++ linux/arch/i386/boot/video.S Wed Oct 27 21:11:58 1999
@@ -234,7 +234,7 @@
xorw %bx, %bx
xorw %di, %di
int $0x10
- cmp $0x004f, %ax
+ cmpw $0x004f, %ax
jnz no_pm

movw %es, %fs:(PARAM_VESAPM_SEG)
Binary files linux-2.3.24/arch/i386/kernel/a.out and linux/arch/i386/kernel/a.out differ
diff -urN linux-2.3.24/arch/i386/kernel/apm.c linux/arch/i386/kernel/apm.c
--- linux-2.3.24/arch/i386/kernel/apm.c Wed Oct 27 20:36:24 1999
+++ linux/arch/i386/kernel/apm.c Wed Oct 27 21:15:58 1999
@@ -419,10 +419,10 @@
"pushl %%ds\n\t" \
"pushl %%es\n\t" \
"xorl %%edx, %%edx\n\t" \
- "mov %%dx, %%ds\n\t" \
- "mov %%dx, %%es\n\t" \
- "mov %%dx, %%fs\n\t" \
- "mov %%dx, %%gs\n\t"
+ "movw %%dx, %%ds\n\t" \
+ "movw %%dx, %%es\n\t" \
+ "movw %%dx, %%fs\n\t" \
+ "movw %%dx, %%gs\n\t"
# define APM_DO_POP_SEGS \
"popl %%es\n\t" \
"popl %%ds\n\t"
diff -urN linux-2.3.24/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
--- linux-2.3.24/arch/i386/kernel/entry.S Wed Oct 27 20:36:23 1999
+++ linux/arch/i386/kernel/entry.S Wed Oct 27 21:02:11 1999
@@ -92,8 +92,8 @@
pushl %ecx; \
pushl %ebx; \
movl $(__KERNEL_DS),%edx; \
- movl %dx,%ds; \
- movl %dx,%es;
+ movw %dx,%ds; \
+ movw %dx,%es;

#define RESTORE_ALL \
popl %ebx; \
@@ -288,15 +288,15 @@
pushl %ecx
pushl %ebx
cld
- movl %es,%cx
+ movw %es,%cx
xchgl %eax, ORIG_EAX(%esp) # orig_eax (get the error code. )
movl %esp,%edx
xchgl %ecx, ES(%esp) # get the address and save es.
pushl %eax # push the error code
pushl %edx
movl $(__KERNEL_DS),%edx
- movl %dx,%ds
- movl %dx,%es
+ movw %dx,%ds
+ movw %dx,%es
GET_CURRENT(%ebx)
call *%ecx
addl $8,%esp
diff -urN linux-2.3.24/arch/i386/kernel/head.S linux/arch/i386/kernel/head.S
--- linux-2.3.24/arch/i386/kernel/head.S Wed Oct 27 21:05:09 1999
+++ linux/arch/i386/kernel/head.S Wed Oct 27 21:33:52 1999
@@ -46,10 +46,10 @@
*/
cld
movl $(__KERNEL_DS),%eax
- movl %ax,%ds
- movl %ax,%es
- movl %ax,%fs
- movl %ax,%gs
+ movw %ax,%ds
+ movw %ax,%es
+ movw %ax,%fs
+ movw %ax,%gs
#ifdef __SMP__
orw %bx,%bx
jz 1f
@@ -230,13 +230,13 @@
lidt idt_descr
ljmp $(__KERNEL_CS),$1f
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
- movl %ax,%ds # after changing gdt.
- movl %ax,%es
- movl %ax,%fs
- movl %ax,%gs
+ movw %ax,%ds # after changing gdt.
+ movw %ax,%es
+ movw %ax,%fs
+ movw %ax,%gs
#ifdef __SMP__
movl $(__KERNEL_DS), %eax
- mov %ax,%ss # Reload the stack pointer (segment only)
+ movw %ax,%ss # Reload the stack pointer (segment only)
#else
lss stack_start,%esp # Load processor stack
#endif
@@ -291,13 +291,13 @@
* sure everything is ok.
*/
setup_idt:
- lea ignore_int,%edx
+ leal ignore_int,%edx
movl $(__KERNEL_CS << 16),%eax
movw %dx,%ax /* selector = 0x0010 = cs */
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */

- lea SYMBOL_NAME(idt_table),%edi
- mov $256,%ecx
+ leal SYMBOL_NAME(idt_table),%edi
+ movl $256,%ecx
rp_sidt:
movl %eax,(%edi)
movl %edx,4(%edi)
@@ -322,8 +322,8 @@
pushl %es
pushl %ds
movl $(__KERNEL_DS),%eax
- movl %ax,%ds
- movl %ax,%es
+ movw %ax,%ds
+ movw %ax,%es
pushl $int_msg
call SYMBOL_NAME(printk)
popl %eax
diff -urN linux-2.3.24/arch/i386/kernel/process.c linux/arch/i386/kernel/process.c
--- linux-2.3.24/arch/i386/kernel/process.c Wed Oct 27 20:36:23 1999
+++ linux/arch/i386/kernel/process.c Wed Oct 27 21:04:09 1999
@@ -289,11 +289,11 @@
the values are consistent for real mode operation already. */

__asm__ __volatile__ ("movl $0x0010,%%eax\n"
- "\tmovl %%ax,%%ds\n"
- "\tmovl %%ax,%%es\n"
- "\tmovl %%ax,%%fs\n"
- "\tmovl %%ax,%%gs\n"
- "\tmovl %%ax,%%ss" : : : "eax");
+ "\tmovw %%ax,%%ds\n"
+ "\tmovw %%ax,%%es\n"
+ "\tmovw %%ax,%%fs\n"
+ "\tmovw %%ax,%%gs\n"
+ "\tmovw %%ax,%%ss" : : : "eax");

/* Jump to the 16-bit code that we copied earlier. It disables paging
and the cache, switches to real mode, and jumps to the BIOS reset
@@ -585,8 +585,8 @@
* Save away %fs and %gs. No need to save %es and %ds, as
* those are always kernel segments while inside the kernel.
*/
- asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
- asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+ __asm__ __volatile__("movw %%fs,%0":"=m" (*(int *)&prev->fs));
+ __asm__ __volatile__("movw %%gs,%0":"=m" (*(int *)&prev->gs));

/*
* Restore %fs and %gs.
diff -urN linux-2.3.24/arch/i386/kernel/signal.c linux/arch/i386/kernel/signal.c
--- linux-2.3.24/arch/i386/kernel/signal.c Wed Oct 27 20:36:24 1999
+++ linux/arch/i386/kernel/signal.c Wed Oct 27 21:44:06 1999
@@ -339,9 +339,9 @@
int tmp, err = 0;

tmp = 0;
- __asm__("movl %%gs,%w0" : "=r"(tmp): "0"(tmp));
+ __asm__("movw %%gs,%w0" : "=r"(tmp): "0"(tmp));
err |= __put_user(tmp, (unsigned int *)&sc->gs);
- __asm__("movl %%fs,%w0" : "=r"(tmp): "0"(tmp));
+ __asm__("movw %%fs,%w0" : "=r"(tmp): "0"(tmp));
err |= __put_user(tmp, (unsigned int *)&sc->fs);

err |= __put_user(regs->xes, (unsigned int *)&sc->es);
diff -urN linux-2.3.24/arch/i386/kernel/trampoline.S linux/arch/i386/kernel/trampoline.S
--- linux-2.3.24/arch/i386/kernel/trampoline.S Wed Oct 27 20:36:24 1999
+++ linux/arch/i386/kernel/trampoline.S Wed Oct 27 21:12:26 1999
@@ -37,10 +37,10 @@
ENTRY(trampoline_data)
r_base = .

- mov %cs, %ax # Code and data in the same place
- mov %ax, %ds
+ movw %cs, %ax # Code and data in the same place
+ movw %ax, %ds

- mov $1, %bx # Flag an SMP trampoline
+ movw $1, %bx # Flag an SMP trampoline
cli # We should be safe anyway

movl $0xA5A5A5A5, trampoline_data - r_base
@@ -49,8 +49,8 @@
lidt idt_48 - r_base # load idt with 0, 0
lgdt gdt_48 - r_base # load gdt with whatever is appropriate

- xor %ax, %ax
- inc %ax # protected mode (PE) bit
+ xorw %ax, %ax
+ incw %ax # protected mode (PE) bit
lmsw %ax # into protected mode
jmp flush_instr
flush_instr:
diff -urN linux-2.3.24/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
--- linux-2.3.24/arch/i386/kernel/traps.c Wed Oct 27 20:36:23 1999
+++ linux/arch/i386/kernel/traps.c Wed Oct 27 21:57:51 1999
@@ -670,7 +670,7 @@
"movb $0,6(%2)\n\t" \
"movb %%ah,7(%2)\n\t" \
"rorl $16,%%eax" \
- : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+ : "=m"(*(n)) : "a" (addr), "r"(n), "ir"((short)limit), "i"(type))

void set_tss_desc(unsigned int n, void *addr)
{
diff -urN linux-2.3.24/arch/i386/kernel/vm86.c linux/arch/i386/kernel/vm86.c
--- linux-2.3.24/arch/i386/kernel/vm86.c Wed Oct 27 20:36:23 1999
+++ linux/arch/i386/kernel/vm86.c Wed Oct 27 21:11:36 1999
@@ -260,7 +260,7 @@
mark_screen_rdonly(tsk);
unlock_kernel();
__asm__ __volatile__(
- "xorl %%eax,%%eax; movl %%ax,%%fs; movl %%ax,%%gs\n\t"
+ "xorl %%eax,%%eax; movw %%ax,%%fs; movw %%ax,%%gs\n\t"
"movl %0,%%esp\n\t"
"jmp ret_from_sys_call"
: /* no outputs */
diff -urN linux-2.3.24/arch/i386/lib/checksum.S linux/arch/i386/lib/checksum.S
--- linux-2.3.24/arch/i386/lib/checksum.S Wed Oct 27 20:36:23 1999
+++ linux/arch/i386/lib/checksum.S Wed Oct 27 21:31:44 1999
@@ -85,8 +85,8 @@
adcl %ebx, %eax
movl 28(%esi), %ebx
adcl %ebx, %eax
- lea 32(%esi), %esi
- dec %ecx
+ leal 32(%esi), %esi
+ decl %ecx
jne 1b
adcl $0, %eax
2: movl %edx, %ecx
@@ -94,8 +94,8 @@
je 4f
shrl $2, %edx # This clears CF
3: adcl (%esi), %eax
- lea 4(%esi), %esi
- dec %edx
+ leal 4(%esi), %esi
+ decl %edx
jne 3b
adcl $0, %eax
4: andl $3, %ecx
@@ -133,13 +133,13 @@
addl %ebx,%esi
shrl $2, %ebx
negl %ebx
- lea 45f(%ebx,%ebx,2), %ebx
+ leal 45f(%ebx,%ebx,2), %ebx
testl %esi, %esi
jmp *%ebx

# Handle 2-byte-aligned regions
20: addw (%esi), %ax
- lea 2(%esi), %esi
+ leal 2(%esi), %esi
adcl $0, %eax
jmp 10b

@@ -189,9 +189,9 @@
adcl -8(%esi), %eax
adcl -4(%esi), %eax
45:
- lea 128(%esi), %esi
+ leal 128(%esi), %esi
adcl $0, %eax
- dec %ecx
+ decl %ecx
jge 40b
movl %edx, %ecx
50: andl $3, %ecx
@@ -303,9 +303,9 @@
adcl %edx, %eax
DST( movl %edx, 28(%edi) )

- lea 32(%esi), %esi
- lea 32(%edi), %edi
- dec %ecx
+ leal 32(%esi), %esi
+ leal 32(%edi), %edi
+ decl %ecx
jne 1b
adcl $0, %eax
2: movl FP(%esp), %edx
@@ -316,9 +316,9 @@
SRC(3: movl (%esi), %ebx )
adcl %ebx, %eax
DST( movl %ebx, (%edi) )
- lea 4(%esi), %esi
- lea 4(%edi), %edi
- dec %edx
+ leal 4(%esi), %esi
+ leal 4(%edi), %edi
+ decl %edx
jne 3b
adcl $0, %eax
4: andl $3, %ecx
@@ -398,7 +398,7 @@
negl %ebx
subl %ebx, %esi
subl %ebx, %edi
- lea 3f(%ebx,%ebx), %ebx
+ leal 3f(%ebx,%ebx), %ebx
testl %esi, %esi
jmp *%ebx
1: addl $64,%esi
@@ -408,7 +408,7 @@
ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
3: adcl $0,%eax
- dec %ecx
+ decl %ecx
jge 1b
4: andl $3, %edx
jz 7f
diff -urN linux-2.3.24/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
--- linux-2.3.24/include/asm-i386/hw_irq.h Wed Oct 27 20:34:41 1999
+++ linux/include/asm-i386/hw_irq.h Wed Oct 27 21:46:09 1999
@@ -124,8 +124,8 @@
"pushl %ecx\n\t" \
"pushl %ebx\n\t" \
"movl $" STR(__KERNEL_DS) ",%edx\n\t" \
- "movl %dx,%ds\n\t" \
- "movl %dx,%es\n\t"
+ "movw %dx,%ds\n\t" \
+ "movw %dx,%es\n\t"

#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)

--------------8C219C33970E8A43B353D7AD--

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/