15 #define _hw_enirq(o,v,n,m,f,...) _hw_write(n,m,1) HW_EOL(__VA_ARGS__)
16 #define _hwa_enirq(o,v,n,m,f,...) _hwa_write(n,m,1) HW_EOL(__VA_ARGS__)
18 #define _hw_dsirq(o,v,n,m,f,...) _hw_write(n,m,0) HW_EOL(__VA_ARGS__)
19 #define _hwa_dsirq(o,v,n,m,f,...) _hwa_write(n,m,0) HW_EOL(__VA_ARGS__)
21 #define _hw_isenirq(o,v,n,m,f,...) _hw_read(n,m) HW_EOL(__VA_ARGS__)
23 #define _hw_rdirq(o,v,n,m,f,...) _hw_read(n,f) HW_EOL(__VA_ARGS__)
25 #define _hw_clirq(o,v,n,m,f,...) _hw_write(n,f,1) HW_EOL(__VA_ARGS__)
26 #define _hwa_clirq(o,v,n,m,f,...) _hwa_write(n,f,1) HW_EOL(__VA_ARGS__)
28 #define _hw_enirqs(o,a,...) hw_asm("sei") HW_EOL(__VA_ARGS__)
29 #define _hw_dsirqs(o,a,...) hw_asm("cli") HW_EOL(__VA_ARGS__)
48 #define hw_wait_irq , _hw_wait_irq
49 #define _hw_wait_irq(...) do{ hw_asm("sleep"); }while(0)
52 #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)
53 # define HW_ISR_ATTRIBUTES __attribute__((signal, used, externally_visible))
55 # define HW_ISR_ATTRIBUTES __attribute__((signal, used))
58 #define _hw_israttr_atomic ,
59 #define _hw_israttr_non_interruptible ,
60 #define _hw_israttr_interruptible , __attribute__((interrupt))
61 #define _hw_israttr_naked , __attribute__((naked))
66 #define _HW_ISR_(...) _HW_ISR__(__VA_ARGS__)
67 #define _HW_ISR__(v,...) \
68 HW_EXTERN_C void __vector_##v(void) HW_ISR_ATTRIBUTES __VA_ARGS__ ; \
69 void __vector_##v(void)
74 #define _HW_ISR_ALIAS(v1,v2) _HW_ISR_ALIAS_(v1,v2)
75 #define _HW_ISR_ALIAS_(v1,v2) \
76 HW_EXTERN_C void __vector_##v1(void) __attribute__((signal,used,externally_visible)) \
77 __attribute__((alias(HW_Q(__vector_##v2)))); \
78 void __vector_##v1(void)
83 #define _HW_VISR_(v) \
84 HW_EXTERN_C void __vector_##v(void) __attribute__((naked)) ; \
85 void __vector_##v(void) { hw_asm("reti"); }
98 #define hw_waste_cycles(n) __builtin_avr_delay_cycles(n)
101 #include "../../hwa/hwa_2.h"
108 #define hw_power , _hw_power
109 #define hwa_power , _hwa_power
111 #define _hw_power(c,o,a,v,g,...) HW_B(_hwx_pwr1_,g)(_hw,o,v,g)
112 #define _hwa_power(c,o,a,v,g,...) HW_B(_hwx_pwr1_,g)(_hwa,o,v,g)
113 #define _hwx_pwr1_0(h,o,v,g) HW_E(HW_EM_G(g))
114 #define _hwx_pwr1_1(h,o,v,g) HW_B(_hwx_pwr2_,_hw_state_v)(h,o,v)
115 #define _hwx_pwr2_0(h,o,v) HW_E(HW_EM_ST(v))
116 #define _hwx_pwr2_1(h,o,v) HW_B(_hwx_pwr3_,HW_G2(_hw_isa_reg, hw_##o##_##prr))(h,o,v)
117 #define _hwx_pwr3_0(h,o,v) HW_E(HW_EM_FO(h##_power,o))
118 #define _hwx_pwr3_1(h,o,v) h##_write(o,prr,HW_A1(_hw_state_##v)==0)
128 #define HW_ATOMIC(...) \
130 uint8_t s = _hw_read(core0,sreg); \
131 hw_disable, interrupts(); \
133 _hw_write(core0,sreg,s) ; \
146 #define HW_MEM_EEPROM __attribute__((section(".eeprom")))
162 HW_INLINE
void _hw_write_r8 ( intptr_t ra, uint8_t rwm, uint8_t rfm, uint8_t mask, uint8_t value )
164 #if defined HWA_CHECK_ACCESS
166 HWA_E(HW_EM_X(
"_hw_write_r8: invalid access"));
169 #if !defined HWA_NO_CHECK_USEFUL
171 HWA_E(HW_EM_X(
"_hw_write_r8: no bit to be changed?"));
174 #if !defined HWA_NO_CHECK_LIMITS
176 if ( value & (~mask) ) {
182 HWA_E(HW_EM_X(
"_hw_write_r8: value overflows mask"));
188 if ( (value & mask & rwm) != (value & mask) )
189 HWA_E(HW_EM_X(
"_hw_write_r8: bits not writeable."));
191 volatile uint8_t *p = (
volatile uint8_t *)ra ;
194 (mask==0x01 || mask==0x02 || mask==0x04 || mask==0x08 ||
195 mask==0x10 || mask==0x20 || mask==0x40 || mask==0x80) ) {
210 uint8_t rm = rwm & ~mask & ~rfm ;
221 uint8_t sm = mask & rwm & value ;
222 uint8_t cm = mask & rwm & (~value) ;
223 *p = (*p & ~cm) | sm ;
242 HW_INLINE
void _hw_write_r16 ( intptr_t ra, uint16_t rwm, uint16_t rfm, uint16_t mask, uint16_t value )
244 #if defined HWA_CHECK_ACCESS
246 HWA_E(HW_EM_X(
"_hw_write_r16: invalid access"));
249 #if !defined HWA_NO_CHECK_USEFUL
251 HWA_E(HW_EM_X(
"_hw_write_r16: no bit to be changed?"));
254 #if !defined HWA_NO_CHECK_LIMITS
255 if ( value & (~mask) ) {
256 HWA_E(HW_EM_X(
"_hw_write_r16: value overflows mask"));
262 if ( (value & mask & rwm) != (value & mask) )
263 HWA_E(HW_EM_X(
"_hw_write_r16: bits not writeable."));
265 volatile uint16_t *p = (
volatile uint16_t *)ra ;
268 (mask==0x0001 || mask==0x0002 || mask==0x0004 || mask==0x0008 ||
269 mask==0x0010 || mask==0x0020 || mask==0x0040 || mask==0x0080 ||
270 mask==0x0100 || mask==0x0200 || mask==0x0400 || mask==0x0800 ||
271 mask==0x1000 || mask==0x2000 || mask==0x4000 || mask==0x8000) ) {
286 uint16_t rm = rwm & ~mask & ~rfm ;
297 uint16_t sm = mask & rwm & value ;
298 uint16_t cm = mask & rwm & (~value) ;
299 *p = (*p & ~cm) | sm ;
317 HW_INLINE
void _hwa_commit__r8 ( hwa_r8_t *r, uint8_t rwm, uint8_t rfm, _Bool commit )
320 r->ovalue = (r->ovalue & r->omask & ~r->mmask) | (r->mvalue & r->mmask) ;
321 r->omask |= r->mmask ;
326 volatile uint8_t *p = (
volatile uint8_t *)r->a ;
333 uint8_t wm = rwm & r->mmask & ((r->ovalue ^ r->mvalue) | ~r->omask);
336 if ( (uintptr_t)p < 0x40 &&
337 (wm==0x01 || wm==0x02 || wm==0x04 || wm==0x08 ||
338 wm==0x10 || wm==0x20 || wm==0x40 || wm==0x80 ) ) {
343 if ( wm & r->mvalue )
347 r->ovalue = (r->ovalue & ~wm) | (r->mvalue & wm) ;
359 uint8_t rm = ~r->mmask & ~rfm & ~r->omask & rwm ;
368 r->ovalue = ((r->ovalue & ~wm) | (r->mvalue & wm)) & ~rfm ;
372 *p = r->ovalue | (rfm & r->mmask & r->mvalue) ;
375 r->omask |= r->mmask ;
380 HW_INLINE
void _hwa_commit__r16 ( hwa_r16_t *r, uint16_t rwm, uint16_t rfm, _Bool commit )
383 r->ovalue = (r->ovalue & r->omask & ~r->mmask) | (r->mvalue & r->mmask) ;
384 r->omask |= r->mmask ;
389 volatile uint16_t *p = (
volatile uint16_t *)r->a ;
391 uint16_t wm = rwm & r->mmask & ((r->ovalue ^ r->mvalue) | ~r->omask);
398 uint16_t rm = ~r->mmask & ~rfm & ~r->omask & rwm ;
403 r->ovalue = ((r->ovalue & ~wm) | (r->mvalue & wm)) & ~rfm ;
405 *p = r->ovalue | (rfm & r->mmask & r->mvalue) ;
408 r->omask |= r->mmask ;
422 #define _hw_read__r8(ra,rbn,rbp) (((*(volatile uint8_t*)(ra))>>(rbp))&((1U<<(rbn))-1))
425 #define hw_read__r16 , _hw_read_r16
427 #define _hw_read_r16(o,a,wm,fm,...) *(volatile uint16_t*)a
430 HW_INLINE uint16_t _hw_read__r16 ( intptr_t ra, uint8_t rbn, uint8_t rbp )
432 uint16_t m = (1UL<<rbn)-1 ;
433 volatile uint16_t *p = (
volatile uint16_t *)ra ;
434 return ((*p)>>rbp) & m ;
438 #define _hw_atomic_read__r8 _hw_read__r8
448 HW_INLINE uint16_t _hw___atomic_read__r16 ( intptr_t ra )
453 "lds %A[r], %[a]" "\n\t"
455 "lds %B[r], %[a]+1" "\n\t"
467 #define _hw_atomic_read__m11(oo,o,r,rc,ra,rwm,rfm,bn,bp,...) _hw_atomic_read_##rc(ra,bn,bp)
470 HW_INLINE uint16_t _hw_atomic_read__r16 ( intptr_t ra, uint8_t rbn, uint8_t rbp )
473 uint16_t m = ((1UL<<rbn)-1)<<rbp ;
476 volatile uint8_t *pl = (
volatile uint8_t *)ra+0 ;
477 volatile uint8_t *ph = (
volatile uint8_t *)ra+1 ;
479 if ( (m & 0xFF) && (m >> 8) ) {
481 hw_disable, interrupts();
493 v = (*(
volatile uint8_t *)ra+1)<<8 ;
494 else if ( (m>>8) == 0 )
495 v = *(
volatile uint8_t *)ra;
497 v = _hw___atomic_read__r16( ra );
500 return (v>>rbp) & m ;
508 #define atomic_read_word(__addr) \
511 __asm__ __volatile__ ( \
513 "lds %A[res], %[addr] \n\t" \
515 "lds %B[res], %[addr] + 1 \n\t" \
516 : [res] "=&r" (__result) \
517 : [addr] "p" (&(__addr)) \
523 #define atomic_write_word_restore(__addr, __data) \
526 __asm__ __volatile__ ( \
527 "in %[tmp], __SREG__ \n\t" \
529 "sts %[addr], %A[data] \n\t" \
530 "out __SREG__, %[tmp] \n\t" \
531 "sts %[addr] + 1, %B[data] \n\t" \
532 : [tmp] "=&r" (__tmp) \
533 : [data] "r" (__data) \
534 , [addr] "p" (&(__addr)) \