private void GetItemList(int task_ID,List<CrawlerResult> arrayList,string group_url) { MatchCollection matchList; MatchCollection tempMatch; DownWebFile df = new DownWebFile(); var _html = df.GetPageData(group_url,"utf-8"); Regex regExtractHtml = new Regex(@"<table class=""olt"">[\s\S]+?</table>",RegexOptions.None); Regex regLi = new Regex(@"<tr class="""">[\s\S]+?</tr>",RegexOptions.None); Regex regexTitle = new Regex(@"<td class=""title"">[\s\S]+?</td>",RegexOptions.None); Regex regexHref = new Regex(@"<a [^>]+?>[\s\S]+?</a>",RegexOptions.None); Regex regexAuthor = new Regex(@"<td Nowrap=""Nowrap"">[\s\S]+?</td>",RegexOptions.None); matchList = regExtractHtml.Matches(_html); if (matchList.Count < 1) return; var _contentHtml = matchList[0].Value; matchList = regLi.Matches(_contentHtml); for (int i = 0; i < matchList.Count; i++) { var _text = matchList[i].Value; CrawlerResult item = new CrawlerResult(); item.Task_ID = task_ID; tempMatch = regexTitle.Matches(_text); if (tempMatch.Count < 0) continue; item.Title = CommonFunction.DeleteHTMLElement(tempMatch[0].Value); tempMatch = regexHref.Matches(tempMatch[0].Value); if (tempMatch.Count < 0) continue; item.Url = GetURL(tempMatch[0].Value); if (item.Url.IndexOf("http://") < 0) continue; tempMatch = regexAuthor.Matches(_text); if (tempMatch.Count > 0) { item.Author = CommonFunction.DeleteHTMLElement(tempMatch[0].Value); } item.SiteName = "XX"; item.FilterType = IWOMWebCrawlerDbLayer.Common.FilterType.FilterNo; arrayList.Add(item); } }