坏
坏蛋是我
Unregistered / Unconfirmed
GUEST, unregistred user!
我下载了lucene_cn.jar, 存放于C:/lucene/lucene_cn.jar,
AUTOEXEC.BAT设置:
set path=c:/j2sdk1.4.0-rc/bin;%path%
set classpath=.;c:/j2sdk1.4.0-rc/lib/tools.jar;.;C:/JavaCC/javacc2.1/bin;C:/lucene/lucene_cn.jar;
set ANT_HOME=c:/ant
set JAVA_HOME=c:/j2sdk1.4.0-rc
set PATH=%PATH%;%ANT_HOME%/bin
然后在我的代码里面
import org.apache.lucene.analysis.cn.*;
javac时提示:
javac LuceneIndexExample.java
LuceneIndexExample.java:9: package org.apache.lucene.analysis.cndo
es not exist
import org.apache.lucene.analysis.cn.*;
为什么?
然后我去掉import org.apache.lucene.analysis.cn.*;
直接把代码拷贝进去,如下:
class ChineseFilter extends TokenFilter
{
// Only English now, Chinese to be added later.
public static final String[] STOP_WORDS = {
"and", "are", "as", "at", "be", "but", "by",
"for", "if", "in", "into", "is", "it",
"no", "not", "of", "on", "or", "such",
"that", "the", "their", "then
", "there", "these",
"they", "this", "to", "was", "will", "with"
};
private Hashtable stopTable;
public ChineseFilter(TokenStream in) {
input = in;
stopTable = new Hashtable(STOP_WORDS.length);
for (int i = 0;
i < STOP_WORDS.length;
i++)
stopTable.put(STOP_WORDS, STOP_WORDS);
}
public final Token next() throws java.io.IOException {
for (Token token = input.next();
token != null;
token = input.next()) {
String text = token.termText();
if (stopTable.get(text) == null) {
switch (Character.getType(text.charAt(0))) {
case Character.LOWERCASE_LETTER:
case Character.UPPERCASE_LETTER:
// English word/token should larger than 1 character.
if (text.length()>1) {
return token;
}
break;
case Character.OTHER_LETTER:
// One Chinese character as one Chinese word.
// Chinese word extraction to be added later here.
return token;
}
}
}
return null;
}
}
class ChineseTokenizer extends Tokenizer
{
public ChineseTokenizer(Reader in) {
input = in;
}
private int offset = 0, bufferIndex=0, dataLen=0;
private final static int MAX_WORD_LEN = 255;
private final static int IO_BUFFER_SIZE = 1024;
private final char[] buffer = new char[MAX_WORD_LEN];
private final char[] ioBuffer = new char[IO_BUFFER_SIZE];
private int length;
private int start;
private final void push(char c) {
if (length == 0) start = offset-1;
// start of token
buffer[length++] = Character.toLowerCase(c);
// buffer it
}
private final Token flush() {
if (length>0) {
//System.out.println(new String(buffer, 0, length));
return new Token(new String(buffer, 0, length), start, start+length);
}
else
return null;
}
public final Token next() throws java.io.IOException {
length = 0;
start = offset;
while (true) {
final char c;
offset++;
if (bufferIndex >= dataLen) {
dataLen = input.read(ioBuffer);
bufferIndex = 0;
};
if (dataLen == -1) return flush();
else
c = (char) ioBuffer[bufferIndex++];
switch(Character.getType(c)) {
case Character.DECIMAL_DIGIT_NUMBER:
case Character.LOWERCASE_LETTER:
case Character.UPPERCASE_LETTER:
push(c);
if (length == MAX_WORD_LEN) return flush();
break;
case Character.OTHER_LETTER:
if (length>0) {
bufferIndex--;
return flush();
}
push(c);
return flush();
default:
if (length>0) return flush();
break;
}
}
}
}
class ChineseAnalyzer extends Analyzer
{
public ChineseAnalyzer()
{
}
/**
* Creates a TokenStream which tokenizes all the text in the provided Reader.
*
* @return A TokenStream build from a ChineseTokenizer filtered with ChineseFilter.
*/
public final TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new ChineseTokenizer(reader);
result = new ChineseFilter(result);
return result;
}
}
运行正常,能检索西文,但是检索中文时提示
Query: 告
Searching for:
0 total matchingdo
cuments
为什么?
我先去漆饭!各位帮帮忙!感激不尽!
AUTOEXEC.BAT设置:
set path=c:/j2sdk1.4.0-rc/bin;%path%
set classpath=.;c:/j2sdk1.4.0-rc/lib/tools.jar;.;C:/JavaCC/javacc2.1/bin;C:/lucene/lucene_cn.jar;
set ANT_HOME=c:/ant
set JAVA_HOME=c:/j2sdk1.4.0-rc
set PATH=%PATH%;%ANT_HOME%/bin
然后在我的代码里面
import org.apache.lucene.analysis.cn.*;
javac时提示:
javac LuceneIndexExample.java
LuceneIndexExample.java:9: package org.apache.lucene.analysis.cndo
es not exist
import org.apache.lucene.analysis.cn.*;
为什么?
然后我去掉import org.apache.lucene.analysis.cn.*;
直接把代码拷贝进去,如下:
class ChineseFilter extends TokenFilter
{
// Only English now, Chinese to be added later.
public static final String[] STOP_WORDS = {
"and", "are", "as", "at", "be", "but", "by",
"for", "if", "in", "into", "is", "it",
"no", "not", "of", "on", "or", "such",
"that", "the", "their", "then
", "there", "these",
"they", "this", "to", "was", "will", "with"
};
private Hashtable stopTable;
public ChineseFilter(TokenStream in) {
input = in;
stopTable = new Hashtable(STOP_WORDS.length);
for (int i = 0;
i < STOP_WORDS.length;
i++)
stopTable.put(STOP_WORDS, STOP_WORDS);
}
public final Token next() throws java.io.IOException {
for (Token token = input.next();
token != null;
token = input.next()) {
String text = token.termText();
if (stopTable.get(text) == null) {
switch (Character.getType(text.charAt(0))) {
case Character.LOWERCASE_LETTER:
case Character.UPPERCASE_LETTER:
// English word/token should larger than 1 character.
if (text.length()>1) {
return token;
}
break;
case Character.OTHER_LETTER:
// One Chinese character as one Chinese word.
// Chinese word extraction to be added later here.
return token;
}
}
}
return null;
}
}
class ChineseTokenizer extends Tokenizer
{
public ChineseTokenizer(Reader in) {
input = in;
}
private int offset = 0, bufferIndex=0, dataLen=0;
private final static int MAX_WORD_LEN = 255;
private final static int IO_BUFFER_SIZE = 1024;
private final char[] buffer = new char[MAX_WORD_LEN];
private final char[] ioBuffer = new char[IO_BUFFER_SIZE];
private int length;
private int start;
private final void push(char c) {
if (length == 0) start = offset-1;
// start of token
buffer[length++] = Character.toLowerCase(c);
// buffer it
}
private final Token flush() {
if (length>0) {
//System.out.println(new String(buffer, 0, length));
return new Token(new String(buffer, 0, length), start, start+length);
}
else
return null;
}
public final Token next() throws java.io.IOException {
length = 0;
start = offset;
while (true) {
final char c;
offset++;
if (bufferIndex >= dataLen) {
dataLen = input.read(ioBuffer);
bufferIndex = 0;
};
if (dataLen == -1) return flush();
else
c = (char) ioBuffer[bufferIndex++];
switch(Character.getType(c)) {
case Character.DECIMAL_DIGIT_NUMBER:
case Character.LOWERCASE_LETTER:
case Character.UPPERCASE_LETTER:
push(c);
if (length == MAX_WORD_LEN) return flush();
break;
case Character.OTHER_LETTER:
if (length>0) {
bufferIndex--;
return flush();
}
push(c);
return flush();
default:
if (length>0) return flush();
break;
}
}
}
}
class ChineseAnalyzer extends Analyzer
{
public ChineseAnalyzer()
{
}
/**
* Creates a TokenStream which tokenizes all the text in the provided Reader.
*
* @return A TokenStream build from a ChineseTokenizer filtered with ChineseFilter.
*/
public final TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new ChineseTokenizer(reader);
result = new ChineseFilter(result);
return result;
}
}
运行正常,能检索西文,但是检索中文时提示
Query: 告
Searching for:
0 total matchingdo
cuments
为什么?
我先去漆饭!各位帮帮忙!感激不尽!