Java 类javax.swing.text.Segment 实例源码
项目:incubator-netbeans
文件:SyntaxSupport.java
/** Initialize the syntax so it's ready to scan the given area.
* @param syntax lexical analyzer to prepare
* @param startPos starting position of the scanning
* @param endPos ending position of the scanning
* @param forceLastBuffer force the syntax to think that the scanned area is the last
* in the document. This is useful for forcing the syntax to process all the characters
* in the given area.
* @param forceNotLastBuffer force the syntax to think that the scanned area is NOT
* the last buffer in the document. This is useful when the syntax will continue
* scanning on another buffer.
*/
public void initSyntax(Syntax syntax, int startPos, int endPos,
boolean forceLastBuffer, boolean forceNotLastBuffer)
throws BadLocationException {
doc.readLock();
try {
Segment text = new Segment();
int docLen = doc.getLength();
doc.prepareSyntax(text, syntax, startPos, 0, forceLastBuffer, forceNotLastBuffer);
int preScan = syntax.getPreScan();
char[] buffer = doc.getChars(startPos - preScan, endPos - startPos + preScan);
boolean lastBuffer = forceNotLastBuffer ? false
: (forceLastBuffer || (endPos == docLen));
syntax.relocate(buffer, preScan, endPos - startPos, lastBuffer, endPos);
} finally {
doc.readUnlock();
}
}
项目:incubator-netbeans
文件:DocumentContent.java
public void getChars(int offset, int length, Segment chars)
throws BadLocationException {
checkBounds(offset, length, length());
if ((offset + length) <= gapStart) { // completely below gap
chars.array = charArray;
chars.offset = offset;
} else if (offset >= gapStart) { // completely above gap
chars.array = charArray;
chars.offset = offset + gapLength;
} else { // spans the gap, must copy
chars.array = copySpanChars(offset, length);
chars.offset = 0;
}
chars.count = length;
}
项目:incubator-netbeans
文件:OverviewControllerUI.java
@Override
public void getText(int offset, int length, Segment txt) throws BadLocationException {
if (lastOffset == offset && lastLength == length) {
txt.array = segArray;
txt.offset = segOffset;
txt.count = segCount;
txt.setPartialReturn(segPartialReturn);
return;
}
super.getText(offset, length, txt);
if (length > CACHE_BOUNDARY || lastLength <= CACHE_BOUNDARY) {
segArray = txt.array;
segOffset = txt.offset;
segCount = txt.count;
segPartialReturn = txt.isPartialReturn();
lastOffset = offset;
lastLength = length;
}
}
项目:incubator-netbeans
文件:ThreadDumpWindow.java
@Override
public void getText(int offset, int length, Segment txt) throws BadLocationException {
if (lastOffset == offset && lastLength == length) {
txt.array = segArray;
txt.offset = segOffset;
txt.count = segCount;
txt.setPartialReturn(segPartialReturn);
return;
}
super.getText(offset, length, txt);
if (length > CACHE_BOUNDARY || lastLength <= CACHE_BOUNDARY) {
segArray = txt.array;
segOffset = txt.offset;
segCount = txt.count;
segPartialReturn = txt.isPartialReturn();
lastOffset = offset;
lastLength = length;
}
}
项目:Yass
文件:DocumentWordTokenizer.java
/**
* Creates a new DocumentWordTokenizer to work on a document
*
* @param document The document to spell check
*/
public DocumentWordTokenizer(Document document) {
this.document = document;
//Create a text segment over the entire document
text = new Segment();
sentenceIterator = BreakIterator.getSentenceInstance();
try {
document.getText(0, document.getLength(), text);
sentenceIterator.setText(text);
currentWordPos = getNextWordStart(text, 0);
//If the current word pos is -1 then the string was all white space
if (currentWordPos != -1) {
currentWordEnd = getNextWordEnd(text, currentWordPos);
nextWordPos = getNextWordStart(text, currentWordEnd);
}
else {
moreTokens = false;
}
}
catch (BadLocationException ex) {
moreTokens = false;
}
}
项目:rapidminer
文件:SyntaxUtilities.java
/**
* Checks if a subregion of a <code>Segment</code> is equal to a string.
*
* @param ignoreCase
* True if case should be ignored, false otherwise
* @param text
* The segment
* @param offset
* The offset into the segment
* @param match
* The string to match
*/
public static boolean regionMatches(boolean ignoreCase, Segment text, int offset, String match) {
int length = offset + match.length();
char[] textArray = text.array;
if (length > text.offset + text.count) {
return false;
}
for (int i = offset, j = 0; i < length; i++, j++) {
char c1 = textArray[i];
char c2 = match.charAt(j);
if (ignoreCase) {
c1 = Character.toUpperCase(c1);
c2 = Character.toUpperCase(c2);
}
if (c1 != c2) {
return false;
}
}
return true;
}
项目:rapidminer
文件:SyntaxUtilities.java
/**
* Checks if a subregion of a <code>Segment</code> is equal to a character array.
*
* @param ignoreCase
* True if case should be ignored, false otherwise
* @param text
* The segment
* @param offset
* The offset into the segment
* @param match
* The character array to match
*/
public static boolean regionMatches(boolean ignoreCase, Segment text, int offset, char[] match) {
int length = offset + match.length;
char[] textArray = text.array;
if (length > text.offset + text.count) {
return false;
}
for (int i = offset, j = 0; i < length; i++, j++) {
char c1 = textArray[i];
char c2 = match[j];
if (ignoreCase) {
c1 = Character.toUpperCase(c1);
c2 = Character.toUpperCase(c2);
}
if (c1 != c2) {
return false;
}
}
return true;
}
项目:rapidminer
文件:PatchTokenMarker.java
@Override
public byte markTokensImpl(byte token, Segment line, int lineIndex) {
if (line.count == 0) {
return Token.NULL;
}
switch (line.array[line.offset]) {
case '+':
case '>':
addToken(line.count, Token.KEYWORD1);
break;
case '-':
case '<':
addToken(line.count, Token.KEYWORD2);
break;
case '@':
case '*':
addToken(line.count, Token.KEYWORD3);
break;
default:
addToken(line.count, Token.NULL);
break;
}
return Token.NULL;
}
项目:whackpad
文件:ConsoleTextArea.java
synchronized void returnPressed() {
Document doc = getDocument();
int len = doc.getLength();
Segment segment = new Segment();
try {
doc.getText(outputMark, len - outputMark, segment);
} catch(javax.swing.text.BadLocationException ignored) {
ignored.printStackTrace();
}
if(segment.count > 0) {
history.add(segment.toString());
}
historyIndex = history.size();
inPipe.write(segment.array, segment.offset, segment.count);
append("\n");
outputMark = doc.getLength();
inPipe.write("\n");
inPipe.flush();
console1.flush();
}
项目:powertext
文件:DocumentWordTokenizer.java
/**
* Creates a new DocumentWordTokenizer to work on a document
* @param document The document to spell check
*/
public DocumentWordTokenizer(Document document) {
this.document = document;
//Create a text segment over the entire document
text = new Segment();
sentenceIterator = BreakIterator.getSentenceInstance();
try {
document.getText(0, document.getLength(), text);
sentenceIterator.setText(text);
// robert: use text.getBeginIndex(), not 0, for segment's first offset
currentWordPos = getNextWordStart(text, text.getBeginIndex());
//If the current word pos is -1 then the string was all white space
if (currentWordPos != -1) {
currentWordEnd = getNextWordEnd(text, currentWordPos);
nextWordPos = getNextWordStart(text, currentWordEnd);
} else {
moreTokens = false;
}
} catch (BadLocationException ex) {
moreTokens = false;
}
}
项目:powertext
文件:DocumentWordTokenizer.java
/**
* Sets the current word position at the start of the word containing
* the char at position pos. This way a call to nextWord() will return
* this word.
*
* @param pos position in the word we want to set as current.
*/
public void posStartFullWordFrom(int pos){
currentWordPos=text.getBeginIndex();
if(pos>text.getEndIndex())
pos=text.getEndIndex();
for (char ch = text.setIndex(pos); ch != Segment.DONE; ch = text.previous()) {
if (!Character.isLetterOrDigit(ch)) {
if (ch == '-' || ch == '\'') { // handle ' and - inside words
char ch2 = text.previous();
text.next();
if (ch2 != Segment.DONE && Character.isLetterOrDigit(ch2))
continue;
}
currentWordPos=text.getIndex()+1;
break;
}
}
//System.out.println("CurPos:"+currentWordPos);
if(currentWordPos==0)
first=true;
moreTokens=true;
currentWordEnd = getNextWordEnd(text, currentWordPos);
nextWordPos = getNextWordStart(text, currentWordEnd + 1);
}
项目:openjdk-jdk10
文件:bug8134721.java
private static void testNPE() {
Graphics g = null;
try {
String test = "\ttest\ttest2";
BufferedImage buffImage = new BufferedImage(
100, 100, BufferedImage.TYPE_INT_RGB);
g = buffImage.createGraphics();
Segment segment = new Segment(test.toCharArray(), 0, test.length());
Utilities.drawTabbedText(segment, 0, 0, g, null, 0);
} finally {
if (g != null) {
g.dispose();
}
}
}
项目:powertext
文件:JsonTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = YYINITIAL;
start = text.offset;
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
项目:powertext
文件:DockerTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = TokenTypes.NULL;
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
项目:powertext
文件:AssemblerX86TokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = Token.NULL;
switch (initialTokenType) {
default:
state = Token.NULL;
}
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
项目:powertext
文件:RSyntaxDocument.java
/**
* Deserializes a document.
*
* @param in The stream to read from.
* @throws ClassNotFoundException
* @throws IOException
*/
private void readObject(ObjectInputStream in)
throws ClassNotFoundException, IOException {
in.defaultReadObject();
// Install default TokenMakerFactory. To support custom TokenMakers,
// both JVM's should install default TokenMakerFactories that support
// the language they want to use beforehand.
setTokenMakerFactory(null);
// Handle other transient stuff
this.s = new Segment();
int lineCount = getDefaultRootElement().getElementCount();
lastTokensOnLines = new DynamicIntArray(lineCount);
setSyntaxStyle(syntaxStyle); // Actually install (transient) TokenMaker
}
项目:incubator-netbeans
文件:LineSeparatorConversion.java
public Segment nextConverted() throws IOException {
readWholeBuffer = false;
if (reader == null) { // no more chars to read
return null;
}
int readOffset = 0;
int readSize = readBuffer(reader, convertedText.array, readOffset, true);
if (readSize == 0) { // no more chars in reader
reader.close();
reader = null;
return null;
}
readWholeBuffer = (readSize == convertedText.array.length);
if (lastCharCR && readSize > 0 && convertedText.array[readOffset] == '\n') {
/* the preceding '\r' was already converted to '\n'
* in the previous buffer so here just skip initial '\n'
*/
readOffset++;
readSize--;
}
convertedText.offset = readOffset;
convertedText.count = readSize;
lastCharCR = convertSegmentToLineFeed(convertedText);
return convertedText;
}
项目:incubator-netbeans
文件:LineSeparatorConversion.java
/**
* Convert all the '\r\n' or '\r' to '\n' (linefeed).
* This method
* @param text the text to be converted. Text is converted
* in the original array of the given segment.
* The <CODE>count</CODE> field
* of the text parameter will possibly be changed by the conversion
* if '\r\n' sequences are present.
* @return whether the last character in the text was the '\r' character.
* That character was already converted to '\n' and is present
* in the segment. However this notification is important
* because if there would be '\n' at the begining
* of the next buffer then that character should be skipped.
*/
private static boolean convertSegmentToLineFeed(Segment text) {
char[] chars = text.array;
int storeOffset = text.offset; // offset at which chars are stored
int endOffset = storeOffset + text.count;
boolean storeChar = false; // to prevent copying same chars to same offsets
boolean lastCharCR = false; // whether last char was '\r'
for (int offset = storeOffset; offset < endOffset; offset++) {
char ch = chars[offset];
if (lastCharCR && ch == '\n') { // found CRLF sequence
lastCharCR = false;
storeChar = true; // storeOffset now differs from offset
} else { // not CRLF sequence
if (ch == '\r') {
lastCharCR = true;
chars[storeOffset++] = '\n'; // convert it to '\n'
} else if (ch == LS || ch == PS) { // Unicode LS, PS
lastCharCR = false;
chars[storeOffset++] = '\n';
} else { // current char not '\r'
lastCharCR = false;
if (storeChar) {
chars[storeOffset] = ch;
}
storeOffset++;
}
}
}
text.count = storeOffset - text.offset;
return lastCharCR;
}
项目:powertext
文件:WrappedSyntaxView.java
/**
* Makes a <code>Segment</code> point to the text in our
* document between the given positions. Note that the positions MUST be
* valid positions in the document.
*
* @param p0 The first position in the document.
* @param p1 The second position in the document.
* @param document The document from which you want to get the text.
* @param seg The segment in which to load the text.
*/
private void setSegment(int p0, int p1, Document document,
Segment seg) {
try {
//System.err.println("... in setSharedSegment, p0/p1==" + p0 + "/" + p1);
document.getText(p0, p1-p0, seg);
//System.err.println("... in setSharedSegment: s=='" + s + "'; line/numLines==" + line + "/" + numLines);
} catch (BadLocationException ble) { // Never happens
ble.printStackTrace();
}
}
项目:Yass
文件:DocumentWordTokenizer.java
/**
* This helper method will return the end of the next word in the buffer.
*
* @param text Description of the Parameter
* @param startPos Description of the Parameter
* @return The nextWordEnd value
*/
private static int getNextWordEnd(Segment text, int startPos) {
for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) {
if (!Character.isLetterOrDigit(ch)) {
// changed by Saruta
if (ch == '-' || ch == '\'' || ch == '~') {
// handle ' and - inside words
continue;
}
return text.getIndex();
}
}
return text.getEndIndex();
}
项目:incubator-netbeans
文件:Analyzer.java
/** Read from some reader and insert into document */
static void read(BaseDocument doc, Reader reader, int pos)
throws BadLocationException, IOException {
int readBufferSize = ((Integer)doc.getProperty(EditorPreferencesKeys.READ_BUFFER_SIZE)).intValue();
LineSeparatorConversion.ToLineFeed toLF
= new LineSeparatorConversion.ToLineFeed(reader, readBufferSize);
Segment text = toLF.nextConverted();
while (text != null) {
doc.insertString(pos, new String(text.array, text.offset, text.count), null);
pos += text.count;
text = toLF.nextConverted();
}
}
项目:incubator-netbeans
文件:OutputView.java
private int drawText(Graphics g,
int x, int y,
int startOffset, int endOffset,
boolean error,
boolean selected,
DocElement docElem) throws BadLocationException {
Segment s = EventQueue.isDispatchThread() ? SEGMENT : new Segment();
s.array = docElem.getChars();
s.offset = startOffset - docElem.offset;
s.count = endOffset - startOffset;
g.setColor(getColor(error, selected));
return Utilities.drawTabbedText(s, x, y, g, this, startOffset);
}
项目:rapidminer
文件:SQLTokenMarker.java
private void searchBack(Segment line, int pos, boolean padNull) {
int len = pos - lastKeyword;
byte id = keywords.lookup(line, lastKeyword, len);
if (id != Token.NULL) {
if (lastKeyword != lastOffset) {
addToken(lastKeyword - lastOffset, Token.NULL);
}
addToken(len, id);
lastOffset = pos;
}
lastKeyword = pos + 1;
if (padNull && lastOffset < pos) {
addToken(pos - lastOffset, Token.NULL);
}
}
项目:rapidminer
文件:SyntaxUtilities.java
/**
* Paints the specified line onto the graphics context. Note that this method munges the offset
* and count values of the segment.
*
* @param line
* The line segment
* @param tokens
* The token list for the line
* @param styles
* The syntax style list
* @param expander
* The tab expander used to determine tab stops. May be null
* @param gfx
* The graphics context
* @param x
* The x co-ordinate
* @param y
* The y co-ordinate
* @return The x co-ordinate, plus the width of the painted string
*/
public static int paintSyntaxLine(Segment line, Token tokens, SyntaxStyle[] styles, TabExpander expander, Graphics gfx,
int x, int y) {
Font defaultFont = gfx.getFont();
Color defaultColor = gfx.getColor();
int offset = 0;
for (;;) {
byte id = tokens.id;
if (id == Token.END) {
break;
}
int length = tokens.length;
if (id == Token.NULL) {
if (!defaultColor.equals(gfx.getColor())) {
gfx.setColor(defaultColor);
}
if (!defaultFont.equals(gfx.getFont())) {
gfx.setFont(defaultFont);
}
} else {
styles[id].setGraphicsFlags(gfx, defaultFont);
}
line.count = length;
x = Utilities.drawTabbedText(line, x, y, gfx, expander, 0);
line.offset += length;
offset += length;
tokens = tokens.next;
}
return x;
}
项目:powertext
文件:TokenMakerBase.java
/**
* {@inheritDoc}
*/
@Override
public int getLastTokenTypeOnLine(Segment text, int initialTokenType) {
// Last parameter doesn't matter if we're not painting.
Token t = getTokenList(text, initialTokenType, 0);
while (t.getNextToken()!=null) {
t = t.getNextToken();
}
return t.getType();
}
项目:rapidminer
文件:PHPTokenMarker.java
private boolean doKeyword(Segment line, int i, char c) {
int i1 = i + 1;
int len = i - lastKeyword;
byte id = keywords.lookup(line, lastKeyword, len);
if (id != Token.NULL) {
if (lastKeyword != lastOffset) {
addToken(lastKeyword - lastOffset, Token.KEYWORD3);
}
addToken(len, id);
lastOffset = i;
}
lastKeyword = i1;
return false;
}
项目:rapidminer
文件:CTokenMarker.java
private boolean doKeyword(Segment line, int i, char c) {
int i1 = i + 1;
int len = i - lastKeyword;
byte id = keywords.lookup(line, lastKeyword, len);
if (id != Token.NULL) {
if (lastKeyword != lastOffset) {
addToken(lastKeyword - lastOffset, Token.NULL);
}
addToken(len, id);
lastOffset = i;
}
lastKeyword = i1;
return false;
}
项目:rapidminer
文件:HTMLTokenMarker.java
private boolean doKeyword(Segment line, int i, char c) {
int i1 = i + 1;
int len = i - lastKeyword;
byte id = keywords.lookup(line, lastKeyword, len);
if (id != Token.NULL) {
if (lastKeyword != lastOffset) {
addToken(lastKeyword - lastOffset, Token.NULL);
}
addToken(len, id);
lastOffset = i;
}
lastKeyword = i1;
return false;
}
项目:JavaGraph
文件:PrologTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = Token.NULL;
switch (initialTokenType) {
case Token.COMMENT_MULTILINE:
state = MLC;
this.start = text.offset;
break;
/* No documentation comments */
default:
state = Token.NULL;
}
this.s = text;
try {
yyreset(this.zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
// ignore
return new DefaultToken();
}
}
项目:JavaGraph
文件:CtrlTokenMaker.java
/**
* Checks the token to give it the exact ID it deserves before being passed
* up to the super method.
*
* @param segment <code>Segment</code> to get text from.
* @param start Start offset in <code>segment</code> of token.
* @param end End offset in <code>segment</code> of token.
* @param tokenType The token's type.
* @param startOffset The offset in the document at which the token occurs.
*/
@Override
public void addToken(Segment segment, int start, int end, int tokenType, int startOffset) {
switch (tokenType) {
// Since reserved words, functions, and data types are all passed
// into here as "identifiers," we have to see what the token
// really is...
case Token.IDENTIFIER:
int value = this.wordsToHighlight.get(segment, start, end);
if (value != -1) {
tokenType = value;
}
break;
case Token.WHITESPACE:
case Token.SEPARATOR:
case Token.OPERATOR:
case Token.ERROR_IDENTIFIER:
case Token.ERROR_NUMBER_FORMAT:
case Token.ERROR_STRING_DOUBLE:
case Token.ERROR_CHAR:
case Token.COMMENT_EOL:
case Token.COMMENT_MULTILINE:
case Token.LITERAL_BOOLEAN:
case Token.LITERAL_NUMBER_DECIMAL_INT:
case Token.LITERAL_NUMBER_FLOAT:
case Token.LITERAL_NUMBER_HEXADECIMAL:
case Token.LITERAL_STRING_DOUBLE_QUOTE:
case Token.LITERAL_CHAR:
break;
default:
throw new IllegalArgumentException("Unknown tokenType: '" + tokenType + "'");
}
super.addToken(segment, start, end, tokenType, startOffset);
}
项目:powertext
文件:DocumentWordTokenizer.java
/** This helper method will return the start character of the next
* word in the buffer from the start position
*/
private static int getNextWordStart(Segment text, int startPos) {
if (startPos <= text.getEndIndex())
for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) {
if (Character.isLetterOrDigit(ch)) {
return text.getIndex();
}
}
return -1;
}
项目:powertext
文件:DocumentWordTokenizer.java
/** This helper method will return the end of the next word in the buffer.
*
*/
private static int getNextWordEnd(Segment text, int startPos) {
for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) {
if (!Character.isLetterOrDigit(ch)) {
if (ch == '-' || ch == '\'') { // handle ' and - inside words
char ch2 = text.next();
text.previous();
if (ch2 != Segment.DONE && Character.isLetterOrDigit(ch2))
continue;
}
return text.getIndex();
}
}
return text.getEndIndex();
}
项目:Yass
文件:DocumentWordTokenizer.java
/**
* This helper method will return the start character of the next
* word in the buffer from the start position
*
* @param text Description of the Parameter
* @param startPos Description of the Parameter
* @return The nextWordStart value
*/
private static int getNextWordStart(Segment text, int startPos) {
if (startPos <= text.getEndIndex())
for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) {
// changed by Saruta
if (Character.isLetterOrDigit(ch) || ch == '-' || ch == '\'' || ch == '~') {
return text.getIndex();
}
}
return -1;
}
项目:powertext
文件:RPrintUtilities.java
/**
* Removes any spaces or tabs from the end of the segment.
*
* @param segment The segment from which to remove tailing whitespace.
* @return <code>segment</code> with trailing whitespace removed.
*/
private static Segment removeEndingWhitespace(Segment segment) {
int toTrim = 0;
char currentChar = segment.setIndex(segment.getEndIndex()-1);
while ((currentChar==' ' || currentChar=='\t') && currentChar!=Segment.DONE) {
toTrim++;
currentChar = segment.previous();
}
String stringVal = segment.toString();
String newStringVal = stringVal.substring(0,stringVal.length()-toTrim);
return new Segment(newStringVal.toCharArray(), 0, newStringVal.length());
}
项目:powertext
文件:ClojureTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = Token.NULL;
switch (initialTokenType) {
/*case Token.COMMENT_MULTILINE:
state = MLC;
start = text.offset;
break;
case Token.COMMENT_DOCUMENTATION:
state = DOCCOMMENT;
start = text.offset;
break;*/
case Token.LITERAL_STRING_DOUBLE_QUOTE:
state = STRING;
start = text.offset;
break;
default:
state = Token.NULL;
}
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
项目:powertext
文件:CTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = Token.NULL;
switch (initialTokenType) {
case Token.COMMENT_MULTILINE:
state = MLC;
start = text.offset;
break;
default:
state = Token.NULL;
}
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
项目:powertext
文件:ScalaTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = YYINITIAL;
switch (initialTokenType) {
case Token.LITERAL_STRING_DOUBLE_QUOTE:
state = MULTILINE_STRING_DOUBLE;
break;
case Token.COMMENT_MULTILINE:
state = MLC;
break;
default:
state = YYINITIAL;
}
s = text;
start = text.offset;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
项目:powertext
文件:NSISTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = YYINITIAL;
switch (initialTokenType) {
case Token.LITERAL_STRING_DOUBLE_QUOTE:
state = STRING;
break;
case Token.LITERAL_CHAR:
state = CHAR_LITERAL;
break;
case Token.LITERAL_BACKQUOTE:
state = BACKTICKS;
break;
case Token.COMMENT_MULTILINE:
state = MLC;
break;
}
start = text.offset;
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
项目:powertext
文件:UnixShellTokenMaker.java
/**
* Checks the token to give it the exact ID it deserves before
* being passed up to the super method.
*
* @param segment <code>Segment</code> to get text from.
* @param start Start offset in <code>segment</code> of token.
* @param end End offset in <code>segment</code> of token.
* @param tokenType The token's type.
* @param startOffset The offset in the document at which the token occurs.
*/
@Override
public void addToken(Segment segment, int start, int end, int tokenType, int startOffset) {
switch (tokenType) {
// Since reserved words, functions, and data types are all passed into here
// as "identifiers," we have to see what the token really is...
case Token.IDENTIFIER:
int value = wordsToHighlight.get(segment, start,end);
if (value!=-1)
tokenType = value;
break;
case Token.WHITESPACE:
case Token.SEPARATOR:
case Token.OPERATOR:
case Token.LITERAL_NUMBER_DECIMAL_INT:
case Token.LITERAL_STRING_DOUBLE_QUOTE:
case Token.LITERAL_CHAR:
case Token.LITERAL_BACKQUOTE:
case Token.COMMENT_EOL:
case Token.PREPROCESSOR:
case Token.VARIABLE:
break;
default:
tokenType = Token.IDENTIFIER;
break;
}
super.addToken(segment, start, end, tokenType, startOffset);
}
项目:powertext
文件:ActionScriptTokenMaker.java
/**
* Returns the first token in the linked list of tokens generated
* from <code>text</code>. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* <code>text</code> starts.
* @return The first <code>Token</code> in a linked list representing
* the syntax highlighted text.
*/
@Override
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = Token.NULL;
switch (initialTokenType) {
case Token.COMMENT_MULTILINE:
state = MLC;
start = text.offset;
break;
default:
state = Token.NULL;
}
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}