View Javadoc

1   /* Copyright (C) 2003 Internet Archive.
2    *
3    * This file is part of the Heritrix web crawler (crawler.archive.org).
4    *
5    * Heritrix is free software; you can redistribute it and/or modify
6    * it under the terms of the GNU Lesser Public License as published by
7    * the Free Software Foundation; either version 2.1 of the License, or
8    * any later version.
9    *
10   * Heritrix is distributed in the hope that it will be useful,
11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   * GNU Lesser Public License for more details.
14   *
15   * You should have received a copy of the GNU Lesser Public License
16   * along with Heritrix; if not, write to the Free Software
17   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18   *
19   * Created on Nov 17, 2003
20   *
21   * To change the template for this generated file go to
22   * Window>Preferences>Java>Code Generation>Code and Comments
23   */
24  package org.archive.crawler.extractor;
25  
26  import java.io.IOException;
27  import java.util.logging.Level;
28  import java.util.logging.Logger;
29  import java.util.regex.Matcher;
30  
31  import org.apache.commons.codec.DecoderException;
32  import org.apache.commons.httpclient.URIException;
33  import org.archive.crawler.datamodel.CoreAttributeConstants;
34  import org.archive.crawler.datamodel.CrawlURI;
35  import org.archive.crawler.framework.CrawlController;
36  import org.archive.io.ReplayCharSequence;
37  import org.archive.net.LaxURLCodec;
38  import org.archive.net.UURI;
39  import org.archive.util.ArchiveUtils;
40  import org.archive.util.DevUtils;
41  import org.archive.util.TextUtils;
42  
43  /***
44   * Processes Javascript files for strings that are likely to be
45   * crawlable URIs.
46   *
47   * @contributor gojomo
48   * @contributor szznax
49   *
50   */
51  public class ExtractorJS extends Extractor implements CoreAttributeConstants {
52  
53      private static final long serialVersionUID = -2231962381454717720L;
54  
55      private static Logger LOGGER =
56          Logger.getLogger("org.archive.crawler.extractor.ExtractorJS");
57  
58      static final String AMP = "&";
59      static final String ESCAPED_AMP = "&";
60      static final String WHITESPACE = "//s";
61  
62      // finds whitespace-free strings in Javascript
63      // (areas between paired ' or " characters, possibly backslash-quoted
64      // on the ends, but not in the middle)
65      static final String JAVASCRIPT_STRING_EXTRACTOR =
66          "(////{0,8}+(?:\"|\'))(//S{0,"+UURI.MAX_URL_LENGTH+"}?)(?://1)";
67      // GROUPS:
68      // (G1) ' or " with optional leading backslashes
69      // (G2) whitespace-free string delimited on boths ends by G1
70  
71      // determines whether a string is likely URI
72      // (no whitespace or '<' '>',  has an internal dot or some slash,
73      // begins and ends with either '/' or a word-char)
74      static final String STRING_URI_DETECTOR =
75          "(?://w|[//.]{0,2}/)[//S&&[^<>]]*(?://.|/)[//S&&[^<>]]*(?://w|/)";
76  
77      protected long numberOfCURIsHandled = 0;
78      protected static long numberOfLinksExtracted = 0;
79  
80      // strings that STRING_URI_DETECTOR picks up as URIs,
81      // which are known to be problematic, and NOT to be 
82      // added to outLinks
83      protected final static String[] STRING_URI_DETECTOR_EXCEPTIONS = {
84          "text/javascript"
85          };
86      
87      // URIs known to produce false-positives with the current JS extractor.
88      // e.g. currently (2.0.3) the JS extractor produces 13 false-positive 
89      // URIs from http://www.google-analytics.com/urchin.js and only 2 
90      // good URIs, which are merely one pixel images.
91      // TODO: remove this blacklist when JS extractor is improved 
92      protected final static String[] EXTRACTOR_URI_EXCEPTIONS = {
93          "http://www.google-analytics.com/urchin.js"
94          };
95      
96      /***
97       * @param name
98       */
99      public ExtractorJS(String name) {
100         super(name, "JavaScript extractor. Link extraction on JavaScript" +
101                 " files (.js).");
102     }
103 
104     /* (non-Javadoc)
105      * @see org.archive.crawler.framework.Processor#process(org.archive.crawler.datamodel.CrawlURI)
106      */
107     public void extract(CrawlURI curi) {
108         // special-cases, for when we know our current JS extractor does poorly.
109         // TODO: remove this test when JS extractor is improved 
110         for (String s: EXTRACTOR_URI_EXCEPTIONS) {
111             if (curi.toString().equals(s))
112                 return;
113         }
114             
115         if (!isHttpTransactionContentToProcess(curi)) {
116             return;
117         }
118         String contentType = curi.getContentType();
119         if ((contentType == null)) {
120             return;
121         }
122         // If content type is not js and if the viaContext
123         // does not begin with 'script', return.
124         if((contentType.indexOf("javascript") < 0) &&
125             (contentType.indexOf("jscript") < 0) &&
126             (contentType.indexOf("ecmascript") < 0) &&
127             (!curi.toString().toLowerCase().endsWith(".js")) &&
128             (curi.getViaContext() == null || !curi.getViaContext().
129                 toString().toLowerCase().startsWith("script"))) {
130             return;
131         }
132 
133         this.numberOfCURIsHandled++;
134 
135         ReplayCharSequence cs = null;
136         try {
137             cs = curi.getHttpRecorder().getReplayCharSequence();
138         } catch (IOException e) {
139             curi.addLocalizedError(this.getName(), e,
140             	"Failed get of replay char sequence.");
141         }
142         if (cs == null) {
143             LOGGER.warning("Failed getting ReplayCharSequence: " +
144                 curi.toString());
145             return;
146         }
147 
148         try {
149             try {
150                 numberOfLinksExtracted += considerStrings(curi, cs,
151                         getController(), true);
152             } catch (StackOverflowError e) {
153                 DevUtils.warnHandle(e, "ExtractorJS StackOverflowError");
154             }
155             // Set flag to indicate that link extraction is completed.
156             curi.linkExtractorFinished();
157         } finally {
158             // Done w/ the ReplayCharSequence. Close it.
159             if (cs != null) {
160                 try {
161                     cs.close();
162                 } catch (IOException ioe) {
163                     LOGGER.warning(TextUtils.exceptionToString(
164                         "Failed close of ReplayCharSequence.", ioe));
165                 }
166             }
167         }
168     }
169 
170     public static long considerStrings(CrawlURI curi, CharSequence cs,
171             CrawlController controller, boolean handlingJSFile) {
172         long foundLinks = 0;
173         Matcher strings =
174             TextUtils.getMatcher(JAVASCRIPT_STRING_EXTRACTOR, cs);
175         while(strings.find()) {
176             CharSequence subsequence =
177                 cs.subSequence(strings.start(2), strings.end(2));
178             Matcher uri =
179                 TextUtils.getMatcher(STRING_URI_DETECTOR, subsequence);
180             if(uri.matches()) {
181                 String string = uri.group();
182                 // protect against adding outlinks for known problematic matches
183                 if (isUriMatchException(string,cs)) {
184                     TextUtils.recycleMatcher(uri);
185                     continue;
186                 }
187                 string = speculativeFixup(string, curi);
188                 foundLinks++;
189                 try {
190                     if (handlingJSFile) {
191                         curi.createAndAddLinkRelativeToVia(string,
192                             Link.JS_MISC, Link.SPECULATIVE_HOP);
193                     } else {
194                         curi.createAndAddLinkRelativeToBase(string,
195                             Link.JS_MISC, Link.SPECULATIVE_HOP);
196                     }
197                 } catch (URIException e) {
198                     // There may not be a controller (e.g. If we're being run
199                     // by the extractor tool).
200                     if (controller != null) {
201                         controller.logUriError(e, curi.getUURI(), string);
202                     } else {
203                         LOGGER.info(curi + ", " + string + ": " +
204                             e.getMessage());
205                     }
206                 }
207             } else {
208                foundLinks += considerStrings(curi, subsequence,
209                    controller, handlingJSFile);
210             }
211             TextUtils.recycleMatcher(uri);
212         }
213         TextUtils.recycleMatcher(strings);
214         return foundLinks;
215     }
216 
217     /***
218      * checks to see if URI match is a special case 
219      * @param string matched by <code>STRING_URI_DETECTOR</code>
220      * @param cs 
221      * @return true if string is one of <code>STRING_URI_EXCEPTIONS</code>
222      */
223     private static boolean isUriMatchException(String string,CharSequence cs) {
224         for (String s : STRING_URI_DETECTOR_EXCEPTIONS) {
225             if (s.equals(string)) 
226                 return true;
227         }
228         return false;
229     }
230 
231     /***
232      * Perform additional fixup of likely-URI Strings
233      * 
234      * @param string detected candidate String
235      * @return String changed/decoded to increase liklihood it is a 
236      * meaningful non-404 URI
237      */
238     public static String speculativeFixup(String string, CrawlURI curi) {
239         String retVal = string;
240         
241         // unescape ampersands
242         retVal = TextUtils.replaceAll(ESCAPED_AMP, retVal, AMP);
243         
244         // uri-decode if begins with encoded 'http(s)?%3A'
245         Matcher m = TextUtils.getMatcher("(?i)^https?%3A.*",retVal); 
246         if(m.matches()) {
247             try {
248                 retVal = LaxURLCodec.DEFAULT.decode(retVal);
249             } catch (DecoderException e) {
250                 LOGGER.log(Level.INFO,"unable to decode",e);
251             }
252         }
253         TextUtils.recycleMatcher(m);
254         
255         // TODO: more URI-decoding if there are %-encoded parts?
256         
257         // detect scheme-less intended-absolute-URI
258         // intent: "opens with what looks like a dotted-domain, and 
259         // last segment is a top-level-domain (eg "com", "org", etc)" 
260         m = TextUtils.getMatcher(
261                 "^[^//./://s%]+//.[^/://s%]+//.([^//./://s%]+)(/.*|)$", 
262                 retVal);
263         if(m.matches()) {
264             if(ArchiveUtils.isTld(m.group(1))) { 
265                 String schemePlus = "http://";       
266                 // if on exact same host preserve scheme (eg https)
267                 try {
268                     if (retVal.startsWith(curi.getUURI().getHost())) {
269                         schemePlus = curi.getUURI().getScheme() + "://";
270                     }
271                 } catch (URIException e) {
272                     // error retrieving source host - ignore it
273                 }
274                 retVal = schemePlus + retVal; 
275             }
276         }
277         TextUtils.recycleMatcher(m);
278         
279         return retVal; 
280     }
281 
282     /*
283      * (non-Javadoc)
284      * 
285      * @see org.archive.crawler.framework.Processor#report()
286      */
287     public String report() {
288         StringBuffer ret = new StringBuffer();
289         ret.append("Processor: org.archive.crawler.extractor.ExtractorJS\n");
290         ret.append("  Function:          Link extraction on JavaScript code\n");
291         ret.append("  CrawlURIs handled: " + numberOfCURIsHandled + "\n");
292         ret.append("  Links extracted:   " + numberOfLinksExtracted + "\n\n");
293 
294         return ret.toString();
295     }
296 }