97 #ifdef CNE_MACHINE_CPUFLAG_AVX512F
99 #define ALIGNMENT_MASK 0x3F
110 cne_mov16(uint8_t *dst,
const uint8_t *src)
114 xmm0 = _mm_loadu_si128((
const __m128i *)src);
115 _mm_storeu_si128((__m128i *)dst, xmm0);
123 cne_mov32(uint8_t *dst,
const uint8_t *src)
127 ymm0 = _mm256_loadu_si256((
const __m256i *)src);
128 _mm256_storeu_si256((__m256i *)dst, ymm0);
136 cne_mov64(uint8_t *dst,
const uint8_t *src)
140 zmm0 = _mm512_loadu_si512((
const void *)src);
141 _mm512_storeu_si512((
void *)dst, zmm0);
173 cne_mov128blocks(uint8_t *dst,
const uint8_t *src,
size_t n)
178 zmm0 = _mm512_loadu_si512((
const void *)(src + 0 * 64));
180 zmm1 = _mm512_loadu_si512((
const void *)(src + 1 * 64));
182 _mm512_storeu_si512((
void *)(dst + 0 * 64), zmm0);
183 _mm512_storeu_si512((
void *)(dst + 1 * 64), zmm1);
193 cne_mov512blocks(uint8_t *dst,
const uint8_t *src,
size_t n)
195 __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
198 zmm0 = _mm512_loadu_si512((
const void *)(src + 0 * 64));
200 zmm1 = _mm512_loadu_si512((
const void *)(src + 1 * 64));
201 zmm2 = _mm512_loadu_si512((
const void *)(src + 2 * 64));
202 zmm3 = _mm512_loadu_si512((
const void *)(src + 3 * 64));
203 zmm4 = _mm512_loadu_si512((
const void *)(src + 4 * 64));
204 zmm5 = _mm512_loadu_si512((
const void *)(src + 5 * 64));
205 zmm6 = _mm512_loadu_si512((
const void *)(src + 6 * 64));
206 zmm7 = _mm512_loadu_si512((
const void *)(src + 7 * 64));
208 _mm512_storeu_si512((
void *)(dst + 0 * 64), zmm0);
209 _mm512_storeu_si512((
void *)(dst + 1 * 64), zmm1);
210 _mm512_storeu_si512((
void *)(dst + 2 * 64), zmm2);
211 _mm512_storeu_si512((
void *)(dst + 3 * 64), zmm3);
212 _mm512_storeu_si512((
void *)(dst + 4 * 64), zmm4);
213 _mm512_storeu_si512((
void *)(dst + 5 * 64), zmm5);
214 _mm512_storeu_si512((
void *)(dst + 6 * 64), zmm6);
215 _mm512_storeu_si512((
void *)(dst + 7 * 64), zmm7);
223 uintptr_t dstu = (uintptr_t)dst;
224 uintptr_t srcu = (uintptr_t)src;
234 *(uint8_t *)dstu = *(
const uint8_t *)srcu;
235 srcu = (uintptr_t)((
const uint8_t *)srcu + 1);
236 dstu = (uintptr_t)((uint8_t *)dstu + 1);
239 *(uint16_t *)dstu = *(
const uint16_t *)srcu;
240 srcu = (uintptr_t)((
const uint16_t *)srcu + 1);
241 dstu = (uintptr_t)((uint16_t *)dstu + 1);
244 *(uint32_t *)dstu = *(
const uint32_t *)srcu;
245 srcu = (uintptr_t)((
const uint32_t *)srcu + 1);
246 dstu = (uintptr_t)((uint32_t *)dstu + 1);
249 *(uint64_t *)dstu = *(
const uint64_t *)srcu;
257 cne_mov16((uint8_t *)dst, (
const uint8_t *)src);
258 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
262 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
263 cne_mov32((uint8_t *)dst - 32 + n, (
const uint8_t *)src - 32 + n);
269 cne_mov256((uint8_t *)dst, (
const uint8_t *)src);
270 src = (
const uint8_t *)src + 256;
271 dst = (uint8_t *)dst + 256;
275 cne_mov128((uint8_t *)dst, (
const uint8_t *)src);
276 src = (
const uint8_t *)src + 128;
277 dst = (uint8_t *)dst + 128;
279 COPY_BLOCK_128_BACK63:
281 cne_mov64((uint8_t *)dst, (
const uint8_t *)src);
282 cne_mov64((uint8_t *)dst - 64 + n, (
const uint8_t *)src - 64 + n);
286 cne_mov64((uint8_t *)dst - 64 + n, (
const uint8_t *)src - 64 + n);
293 dstofss = ((uintptr_t)dst & 0x3F);
295 dstofss = 64 - dstofss;
297 cne_mov64((uint8_t *)dst, (
const uint8_t *)src);
298 src = (
const uint8_t *)src + dstofss;
299 dst = (uint8_t *)dst + dstofss;
307 cne_mov512blocks((uint8_t *)dst, (
const uint8_t *)src, n);
311 src = (
const uint8_t *)src + bits;
312 dst = (uint8_t *)dst + bits;
320 cne_mov128blocks((uint8_t *)dst, (
const uint8_t *)src, n);
324 src = (
const uint8_t *)src + bits;
325 dst = (uint8_t *)dst + bits;
331 goto COPY_BLOCK_128_BACK63;
334 #elif defined CNE_MACHINE_CPUFLAG_AVX2
336 #define ALIGNMENT_MASK 0x1F
347 cne_mov16(uint8_t *dst,
const uint8_t *src)
351 xmm0 = _mm_loadu_si128((
const __m128i *)src);
352 _mm_storeu_si128((__m128i *)dst, xmm0);
360 cne_mov32(uint8_t *dst,
const uint8_t *src)
364 ymm0 = _mm256_loadu_si256((
const __m256i *)src);
365 _mm256_storeu_si256((__m256i *)dst, ymm0);
373 cne_mov64(uint8_t *dst,
const uint8_t *src)
375 cne_mov32((uint8_t *)dst + 0 * 32, (
const uint8_t *)src + 0 * 32);
376 cne_mov32((uint8_t *)dst + 1 * 32, (
const uint8_t *)src + 1 * 32);
386 cne_mov32((uint8_t *)dst + 0 * 32, (
const uint8_t *)src + 0 * 32);
387 cne_mov32((uint8_t *)dst + 1 * 32, (
const uint8_t *)src + 1 * 32);
388 cne_mov32((uint8_t *)dst + 2 * 32, (
const uint8_t *)src + 2 * 32);
389 cne_mov32((uint8_t *)dst + 3 * 32, (
const uint8_t *)src + 3 * 32);
397 cne_mov128blocks(uint8_t *dst,
const uint8_t *src,
size_t n)
399 __m256i ymm0, ymm1, ymm2, ymm3;
402 ymm0 = _mm256_loadu_si256((
const __m256i *)((
const uint8_t *)src + 0 * 32));
404 ymm1 = _mm256_loadu_si256((
const __m256i *)((
const uint8_t *)src + 1 * 32));
405 ymm2 = _mm256_loadu_si256((
const __m256i *)((
const uint8_t *)src + 2 * 32));
406 ymm3 = _mm256_loadu_si256((
const __m256i *)((
const uint8_t *)src + 3 * 32));
407 src = (
const uint8_t *)src + 128;
408 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
409 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
410 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
411 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
412 dst = (uint8_t *)dst + 128;
444 uintptr_t dstu = (uintptr_t)dst;
445 uintptr_t srcu = (uintptr_t)src;
455 *(uint8_t *)dstu = *(
const uint8_t *)srcu;
456 srcu = (uintptr_t)((
const uint8_t *)srcu + 1);
457 dstu = (uintptr_t)((uint8_t *)dstu + 1);
460 *(uint16_t *)dstu = *(
const uint16_t *)srcu;
461 srcu = (uintptr_t)((
const uint16_t *)srcu + 1);
462 dstu = (uintptr_t)((uint16_t *)dstu + 1);
465 *(uint32_t *)dstu = *(
const uint32_t *)srcu;
466 srcu = (uintptr_t)((
const uint32_t *)srcu + 1);
467 dstu = (uintptr_t)((uint32_t *)dstu + 1);
470 *(uint64_t *)dstu = *(
const uint64_t *)srcu;
479 cne_mov16((uint8_t *)dst, (
const uint8_t *)src);
480 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
484 cne_mov16((uint8_t *)dst, (
const uint8_t *)src);
485 cne_mov16((uint8_t *)dst + 16, (
const uint8_t *)src + 16);
486 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
490 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
491 cne_mov32((uint8_t *)dst - 32 + n, (
const uint8_t *)src - 32 + n);
497 cne_mov128((uint8_t *)dst, (
const uint8_t *)src);
498 src = (
const uint8_t *)src + 128;
499 dst = (uint8_t *)dst + 128;
501 COPY_BLOCK_128_BACK31:
504 cne_mov64((uint8_t *)dst, (
const uint8_t *)src);
505 src = (
const uint8_t *)src + 64;
506 dst = (uint8_t *)dst + 64;
509 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
510 cne_mov32((uint8_t *)dst - 32 + n, (
const uint8_t *)src - 32 + n);
514 cne_mov32((uint8_t *)dst - 32 + n, (
const uint8_t *)src - 32 + n);
522 dstofss = (uintptr_t)dst & 0x1F;
524 dstofss = 32 - dstofss;
526 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
527 src = (
const uint8_t *)src + dstofss;
528 dst = (uint8_t *)dst + dstofss;
534 cne_mov128blocks((uint8_t *)dst, (
const uint8_t *)src, n);
538 src = (
const uint8_t *)src + bits;
539 dst = (uint8_t *)dst + bits;
544 goto COPY_BLOCK_128_BACK31;
549 #define ALIGNMENT_MASK 0x0F
564 xmm0 = _mm_loadu_si128((
const __m128i *)(
const __m128i *)src);
565 _mm_storeu_si128((__m128i *)dst, xmm0);
575 cne_mov16((uint8_t *)dst + 0 * 16, (
const uint8_t *)src + 0 * 16);
576 cne_mov16((uint8_t *)dst + 1 * 16, (
const uint8_t *)src + 1 * 16);
586 cne_mov16((uint8_t *)dst + 0 * 16, (
const uint8_t *)src + 0 * 16);
587 cne_mov16((uint8_t *)dst + 1 * 16, (
const uint8_t *)src + 1 * 16);
588 cne_mov16((uint8_t *)dst + 2 * 16, (
const uint8_t *)src + 2 * 16);
589 cne_mov16((uint8_t *)dst + 3 * 16, (
const uint8_t *)src + 3 * 16);
599 cne_mov16((uint8_t *)dst + 0 * 16, (
const uint8_t *)src + 0 * 16);
600 cne_mov16((uint8_t *)dst + 1 * 16, (
const uint8_t *)src + 1 * 16);
601 cne_mov16((uint8_t *)dst + 2 * 16, (
const uint8_t *)src + 2 * 16);
602 cne_mov16((uint8_t *)dst + 3 * 16, (
const uint8_t *)src + 3 * 16);
603 cne_mov16((uint8_t *)dst + 4 * 16, (
const uint8_t *)src + 4 * 16);
604 cne_mov16((uint8_t *)dst + 5 * 16, (
const uint8_t *)src + 5 * 16);
605 cne_mov16((uint8_t *)dst + 6 * 16, (
const uint8_t *)src + 6 * 16);
606 cne_mov16((uint8_t *)dst + 7 * 16, (
const uint8_t *)src + 7 * 16);
616 cne_mov16((uint8_t *)dst + 0 * 16, (
const uint8_t *)src + 0 * 16);
617 cne_mov16((uint8_t *)dst + 1 * 16, (
const uint8_t *)src + 1 * 16);
618 cne_mov16((uint8_t *)dst + 2 * 16, (
const uint8_t *)src + 2 * 16);
619 cne_mov16((uint8_t *)dst + 3 * 16, (
const uint8_t *)src + 3 * 16);
620 cne_mov16((uint8_t *)dst + 4 * 16, (
const uint8_t *)src + 4 * 16);
621 cne_mov16((uint8_t *)dst + 5 * 16, (
const uint8_t *)src + 5 * 16);
622 cne_mov16((uint8_t *)dst + 6 * 16, (
const uint8_t *)src + 6 * 16);
623 cne_mov16((uint8_t *)dst + 7 * 16, (
const uint8_t *)src + 7 * 16);
624 cne_mov16((uint8_t *)dst + 8 * 16, (
const uint8_t *)src + 8 * 16);
625 cne_mov16((uint8_t *)dst + 9 * 16, (
const uint8_t *)src + 9 * 16);
626 cne_mov16((uint8_t *)dst + 10 * 16, (
const uint8_t *)src + 10 * 16);
627 cne_mov16((uint8_t *)dst + 11 * 16, (
const uint8_t *)src + 11 * 16);
628 cne_mov16((uint8_t *)dst + 12 * 16, (
const uint8_t *)src + 12 * 16);
629 cne_mov16((uint8_t *)dst + 13 * 16, (
const uint8_t *)src + 13 * 16);
630 cne_mov16((uint8_t *)dst + 14 * 16, (
const uint8_t *)src + 14 * 16);
631 cne_mov16((uint8_t *)dst + 15 * 16, (
const uint8_t *)src + 15 * 16);
647 #define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
650 while (len >= 128 + 16 - offset) { \
651 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
653 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
654 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
655 xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
656 xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
657 xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
658 xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
659 xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
660 xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
661 src = (const uint8_t *)src + 128; \
662 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
663 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
664 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
665 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
666 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
667 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
668 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
669 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
670 dst = (uint8_t *)dst + 128; \
673 len = ((len - 16 + offset) & 127) + 16 - offset; \
675 src = (const uint8_t *)src + tmp; \
676 dst = (uint8_t *)dst + tmp; \
677 if (len >= 32 + 16 - offset) { \
678 while (len >= 32 + 16 - offset) { \
679 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
681 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
682 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
683 src = (const uint8_t *)src + 32; \
684 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
685 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
686 dst = (uint8_t *)dst + 32; \
689 len = ((len - 16 + offset) & 31) + 16 - offset; \
691 src = (const uint8_t *)src + tmp; \
692 dst = (uint8_t *)dst + tmp; \
708 #define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
711 case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
712 case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
713 case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
714 case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
715 case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
716 case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
717 case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
718 case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
719 case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
720 case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
721 case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
722 case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
723 case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
724 case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
725 case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
748 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
749 uintptr_t dstu = (uintptr_t)dst;
750 uintptr_t srcu = (uintptr_t)src;
760 *(uint8_t *)dstu = *(
const uint8_t *)srcu;
761 srcu = (uintptr_t)((
const uint8_t *)srcu + 1);
762 dstu = (uintptr_t)((uint8_t *)dstu + 1);
765 *(uint16_t *)dstu = *(
const uint16_t *)srcu;
766 srcu = (uintptr_t)((
const uint16_t *)srcu + 1);
767 dstu = (uintptr_t)((uint16_t *)dstu + 1);
770 *(uint32_t *)dstu = *(
const uint32_t *)srcu;
771 srcu = (uintptr_t)((
const uint32_t *)srcu + 1);
772 dstu = (uintptr_t)((uint32_t *)dstu + 1);
775 *(uint64_t *)dstu = *(
const uint64_t *)srcu;
784 cne_mov16((uint8_t *)dst, (
const uint8_t *)src);
785 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
789 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
790 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
794 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
795 cne_mov16((uint8_t *)dst + 32, (
const uint8_t *)src + 32);
796 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
800 goto COPY_BLOCK_128_BACK15;
805 cne_mov128((uint8_t *)dst, (
const uint8_t *)src);
806 cne_mov128((uint8_t *)dst + 128, (
const uint8_t *)src + 128);
807 src = (
const uint8_t *)src + 256;
808 dst = (uint8_t *)dst + 256;
810 COPY_BLOCK_255_BACK15:
813 cne_mov128((uint8_t *)dst, (
const uint8_t *)src);
814 src = (
const uint8_t *)src + 128;
815 dst = (uint8_t *)dst + 128;
817 COPY_BLOCK_128_BACK15:
820 cne_mov64((uint8_t *)dst, (
const uint8_t *)src);
821 src = (
const uint8_t *)src + 64;
822 dst = (uint8_t *)dst + 64;
824 COPY_BLOCK_64_BACK15:
827 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
828 src = (
const uint8_t *)src + 32;
829 dst = (uint8_t *)dst + 32;
832 cne_mov16((uint8_t *)dst, (
const uint8_t *)src);
833 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
837 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
848 dstofss = (uintptr_t)dst & 0x0F;
850 dstofss = 16 - dstofss + 16;
852 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
853 src = (
const uint8_t *)src + dstofss;
854 dst = (uint8_t *)dst + dstofss;
856 srcofs = ((uintptr_t)src & 0x0F);
865 for (; n >= 256; n -= 256) {
866 cne_mov256((uint8_t *)dst, (
const uint8_t *)src);
867 dst = (uint8_t *)dst + 256;
868 src = (
const uint8_t *)src + 256;
874 goto COPY_BLOCK_255_BACK15;
885 goto COPY_BLOCK_64_BACK15;
912 *(uint8_t *)dst = *(
const uint8_t *)src;
913 src = (
const uint8_t *)src + 1;
914 dst = (uint8_t *)dst + 1;
917 *(uint16_t *)dst = *(
const uint16_t *)src;
918 src = (
const uint16_t *)src + 1;
919 dst = (uint16_t *)dst + 1;
922 *(uint32_t *)dst = *(
const uint32_t *)src;
923 src = (
const uint32_t *)src + 1;
924 dst = (uint32_t *)dst + 1;
927 *(uint64_t *)dst = *(
const uint64_t *)src;
934 cne_mov16((uint8_t *)dst, (
const uint8_t *)src);
935 cne_mov16((uint8_t *)dst - 16 + n, (
const uint8_t *)src - 16 + n);
942 cne_mov32((uint8_t *)dst, (
const uint8_t *)src);
943 cne_mov32((uint8_t *)dst - 32 + n, (
const uint8_t *)src - 32 + n);
949 for (; n >= 64; n -= 64) {
950 cne_mov64((uint8_t *)dst, (
const uint8_t *)src);
951 dst = (uint8_t *)dst + 64;
952 src = (
const uint8_t *)src + 64;
956 cne_mov64((uint8_t *)dst - 64 + n, (
const uint8_t *)src - 64 + n);
964 if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
#define __cne_always_inline
static __cne_always_inline void * cne_pktcpy_generic(void *dst, const void *src, size_t n)
static __cne_always_inline void cne_mov64(uint8_t *dst, const uint8_t *src)
static __cne_always_inline void cne_mov32(uint8_t *dst, const uint8_t *src)
#define MOVEUNALIGNED_LEFT47(dst, src, len, offset)
static __cne_always_inline void cne_mov256(uint8_t *dst, const uint8_t *src)
static __cne_always_inline void * cne_pktcpy(void *dst, const void *src, size_t n)
static __cne_always_inline void cne_mov16(uint8_t *dst, const uint8_t *src)
static __cne_always_inline void cne_mov128(uint8_t *dst, const uint8_t *src)
static __cne_always_inline void * cne_pktcpy_aligned(void *dst, const void *src, size_t n)