一尘不染

从互联网下载HTML之后,字符串中的字符发生了变化

c#

使用以下代码,我可以从互联网上下载文件的HTML:

WebClient wc = new WebClient();

// ....

string downloadedFile = wc.DownloadString("http://www.myurl.com/");

但是,有时文件中包含“有趣的”字符,如éto éto â†フシギダネto フシギダãƒ

我认为这可能与不同的unicode类型有关,或者因为每个字符都变成了2个新字符,也许每个字符都被分成两半,但是我对此领域的了解很少。你觉得错什么?


阅读 320

收藏
2020-05-19

共1个答案

一尘不染

这是一个包装好的下载类,它支持gzip并检查编码标头和meta标签,以便对其正确解码。

实例化该类,然后调用GetPage()

public class HttpDownloader
{
    private readonly string _referer;
    private readonly string _userAgent;

    public Encoding Encoding { get; set; }
    public WebHeaderCollection Headers { get; set; }
    public Uri Url { get; set; }

    public HttpDownloader(string url, string referer, string userAgent)
    {
        Encoding = Encoding.GetEncoding("ISO-8859-1");
        Url = new Uri(url); // verify the uri
        _userAgent = userAgent;
        _referer = referer;
    }

    public string GetPage()
    {
        HttpWebRequest request = (HttpWebRequest)WebRequest.Create(Url);
        if (!string.IsNullOrEmpty(_referer))
            request.Referer = _referer;
        if (!string.IsNullOrEmpty(_userAgent))
            request.UserAgent = _userAgent;

        request.Headers.Add(HttpRequestHeader.AcceptEncoding, "gzip,deflate");

        using (HttpWebResponse response = (HttpWebResponse)request.GetResponse())
        {
            Headers = response.Headers;
            Url = response.ResponseUri;
            return ProcessContent(response);
        }

    }

    private string ProcessContent(HttpWebResponse response)
    {
        SetEncodingFromHeader(response);

        Stream s = response.GetResponseStream();
        if (response.ContentEncoding.ToLower().Contains("gzip"))
            s = new GZipStream(s, CompressionMode.Decompress);
        else if (response.ContentEncoding.ToLower().Contains("deflate"))
            s = new DeflateStream(s, CompressionMode.Decompress);

        MemoryStream memStream = new MemoryStream();
        int bytesRead;
        byte[] buffer = new byte[0x1000];
        for (bytesRead = s.Read(buffer, 0, buffer.Length); bytesRead > 0; bytesRead = s.Read(buffer, 0, buffer.Length))
        {
            memStream.Write(buffer, 0, bytesRead);
        }
        s.Close();
        string html;
        memStream.Position = 0;
        using (StreamReader r = new StreamReader(memStream, Encoding))
        {
            html = r.ReadToEnd().Trim();
            html = CheckMetaCharSetAndReEncode(memStream, html);
        }

        return html;
    }

    private void SetEncodingFromHeader(HttpWebResponse response)
    {
        string charset = null;
        if (string.IsNullOrEmpty(response.CharacterSet))
        {
            Match m = Regex.Match(response.ContentType, @";\s*charset\s*=\s*(?<charset>.*)", RegexOptions.IgnoreCase);
            if (m.Success)
            {
                charset = m.Groups["charset"].Value.Trim(new[] { '\'', '"' });
            }
        }
        else
        {
            charset = response.CharacterSet;
        }
        if (!string.IsNullOrEmpty(charset))
        {
            try
            {
                Encoding = Encoding.GetEncoding(charset);
            }
            catch (ArgumentException)
            {
            }
        }
    }

    private string CheckMetaCharSetAndReEncode(Stream memStream, string html)
    {
        Match m = new Regex(@"<meta\s+.*?charset\s*=\s*""?(?<charset>[A-Za-z0-9_-]+)""?", RegexOptions.Singleline | RegexOptions.IgnoreCase).Match(html);            
        if (m.Success)
        {
            string charset = m.Groups["charset"].Value.ToLower() ?? "iso-8859-1";
            if ((charset == "unicode") || (charset == "utf-16"))
            {
                charset = "utf-8";
            }

            try
            {
                Encoding metaEncoding = Encoding.GetEncoding(charset);
                if (Encoding != metaEncoding)
                {
                    memStream.Position = 0L;
                    StreamReader recodeReader = new StreamReader(memStream, metaEncoding);
                    html = recodeReader.ReadToEnd().Trim();
                    recodeReader.Close();
                }
            }
            catch (ArgumentException)
            {
            }
        }

        return html;
    }
}
2020-05-19