Lines Matching refs:offset
506 size_t offset; member
536 t->offset = 0;
556 assert(t->input[t->offset] == '\r' || t->input[t->offset] == '\n'); in config_skip_newline()
557 if (t->input[t->offset] == '\r' && t->input[t->offset + 1] == '\n') { in config_skip_newline()
559 t->offset ++; in config_skip_newline()
561 t->offset ++; in config_skip_newline()
567 assert(t->input[t->offset] == '#'); in config_skip_comment()
568 for (i = 1; t->input[t->offset + i] && in config_skip_comment()
569 (t->input[t->offset + i] != '\n' && t->input[t->offset + i] != '\r'); in config_skip_comment()
571 t->offset += i; in config_skip_comment()
579 for (tid = 0; tid == 0 && t->offset < t->size && t->input[t->offset] ; ) { in config_tokenizer()
580 char c = t->input[t->offset]; in config_tokenizer()
586 if (t->input[t->offset + 1] == '>') { in config_tokenizer()
587 t->offset += 2; in config_tokenizer()
600 if (t->input[t->offset + 1] == '=') { in config_tokenizer()
601 t->offset += 2; in config_tokenizer()
606 } else if (t->input[t->offset + 1] == '~') { in config_tokenizer()
607 t->offset += 2; in config_tokenizer()
624 buffer_copy_string_len(token, t->input + t->offset, 1); in config_tokenizer()
626 t->offset++; in config_tokenizer()
639 if (t->input[t->offset + 1] == '=') { in config_tokenizer()
640 t->offset += 2; in config_tokenizer()
645 } else if (t->input[t->offset + 1] == '~') { in config_tokenizer()
646 t->offset += 2; in config_tokenizer()
671 t->offset++; in config_tokenizer()
678 while (!done && t->offset < t->size) { in config_tokenizer()
679 switch (t->input[t->offset]) { in config_tokenizer()
693 t->offset++; in config_tokenizer()
717 t->offset++; in config_tokenizer()
722 start = t->input + t->offset + 1; in config_tokenizer()
725 for (i = 1; t->input[t->offset + i]; i++) { in config_tokenizer()
726 if (t->input[t->offset + i] == '\\' && in config_tokenizer()
727 t->input[t->offset + i + 1] == '"') { in config_tokenizer()
729 buffer_append_string_len(token, start, t->input + t->offset + i - start); in config_tokenizer()
731 start = t->input + t->offset + i + 1; in config_tokenizer()
739 if (t->input[t->offset + i] == '"') { in config_tokenizer()
742 buffer_append_string_len(token, start, t->input + t->offset + i - start); in config_tokenizer()
748 if (t->input[t->offset + i] == '\0') { in config_tokenizer()
759 t->offset += i + 1; in config_tokenizer()
764 t->offset++; in config_tokenizer()
772 t->offset++; in config_tokenizer()
780 t->offset++; in config_tokenizer()
791 if (t->input[t->offset + 1] == '=') { in config_tokenizer()
792 t->offset += 2; in config_tokenizer()
796 t->offset++; in config_tokenizer()
803 t->offset++; in config_tokenizer()
812 t->offset++; in config_tokenizer()
821 t->offset++; in config_tokenizer()
830 t->offset++; in config_tokenizer()
843 for (i = 0; t->input[t->offset + i] && in config_tokenizer()
844 (isalpha((unsigned char)t->input[t->offset + i]) in config_tokenizer()
847 if (i && t->input[t->offset + i]) { in config_tokenizer()
849 buffer_copy_string_len(token, t->input + t->offset, i); in config_tokenizer()
851 t->offset += i; in config_tokenizer()
863 for (i = 0; t->input[t->offset + i] && isdigit((unsigned char)t->input[t->offset + i]); i++); in config_tokenizer()
869 buffer_copy_string_len(token, t->input + t->offset, i); in config_tokenizer()
871 t->offset += i; in config_tokenizer()
876 for (i = 0; t->input[t->offset + i] && in config_tokenizer()
877 (isalnum((unsigned char)t->input[t->offset + i]) || in config_tokenizer()
878 t->input[t->offset + i] == '.' || in config_tokenizer()
879 t->input[t->offset + i] == '_' || /* for env.* */ in config_tokenizer()
880 t->input[t->offset + i] == '-' in config_tokenizer()
883 if (i && t->input[t->offset + i]) { in config_tokenizer()
884 buffer_copy_string_len(token, t->input + t->offset, i); in config_tokenizer()
898 t->offset += i; in config_tokenizer()
923 } else if (t->offset < t->size) { in config_tokenizer()
977 t->offset = 0; in tokenizer_init()