##// END OF EJS Templates
py3: replace os.getenv with pycompat.osgetenv...
Pulkit Goyal -
r30664:69acfd2c default
parent child Browse files
Show More
@@ -1,664 +1,665 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import platform
15 import platform
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 error,
22 error,
23 httpconnection,
23 httpconnection,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 pycompat,
26 pycompat,
27 scmutil,
27 scmutil,
28 util,
28 util,
29 )
29 )
30
30
31 shortname = '.hglf'
31 shortname = '.hglf'
32 shortnameslash = shortname + '/'
32 shortnameslash = shortname + '/'
33 longname = 'largefiles'
33 longname = 'largefiles'
34
34
35 # -- Private worker functions ------------------------------------------
35 # -- Private worker functions ------------------------------------------
36
36
37 def getminsize(ui, assumelfiles, opt, default=10):
37 def getminsize(ui, assumelfiles, opt, default=10):
38 lfsize = opt
38 lfsize = opt
39 if not lfsize and assumelfiles:
39 if not lfsize and assumelfiles:
40 lfsize = ui.config(longname, 'minsize', default=default)
40 lfsize = ui.config(longname, 'minsize', default=default)
41 if lfsize:
41 if lfsize:
42 try:
42 try:
43 lfsize = float(lfsize)
43 lfsize = float(lfsize)
44 except ValueError:
44 except ValueError:
45 raise error.Abort(_('largefiles: size must be number (not %s)\n')
45 raise error.Abort(_('largefiles: size must be number (not %s)\n')
46 % lfsize)
46 % lfsize)
47 if lfsize is None:
47 if lfsize is None:
48 raise error.Abort(_('minimum size for largefiles must be specified'))
48 raise error.Abort(_('minimum size for largefiles must be specified'))
49 return lfsize
49 return lfsize
50
50
51 def link(src, dest):
51 def link(src, dest):
52 """Try to create hardlink - if that fails, efficiently make a copy."""
52 """Try to create hardlink - if that fails, efficiently make a copy."""
53 util.makedirs(os.path.dirname(dest))
53 util.makedirs(os.path.dirname(dest))
54 try:
54 try:
55 util.oslink(src, dest)
55 util.oslink(src, dest)
56 except OSError:
56 except OSError:
57 # if hardlinks fail, fallback on atomic copy
57 # if hardlinks fail, fallback on atomic copy
58 with open(src, 'rb') as srcf:
58 with open(src, 'rb') as srcf:
59 with util.atomictempfile(dest) as dstf:
59 with util.atomictempfile(dest) as dstf:
60 for chunk in util.filechunkiter(srcf):
60 for chunk in util.filechunkiter(srcf):
61 dstf.write(chunk)
61 dstf.write(chunk)
62 os.chmod(dest, os.stat(src).st_mode)
62 os.chmod(dest, os.stat(src).st_mode)
63
63
64 def usercachepath(ui, hash):
64 def usercachepath(ui, hash):
65 '''Return the correct location in the "global" largefiles cache for a file
65 '''Return the correct location in the "global" largefiles cache for a file
66 with the given hash.
66 with the given hash.
67 This cache is used for sharing of largefiles across repositories - both
67 This cache is used for sharing of largefiles across repositories - both
68 to preserve download bandwidth and storage space.'''
68 to preserve download bandwidth and storage space.'''
69 return os.path.join(_usercachedir(ui), hash)
69 return os.path.join(_usercachedir(ui), hash)
70
70
71 def _usercachedir(ui):
71 def _usercachedir(ui):
72 '''Return the location of the "global" largefiles cache.'''
72 '''Return the location of the "global" largefiles cache.'''
73 path = ui.configpath(longname, 'usercache', None)
73 path = ui.configpath(longname, 'usercache', None)
74 if path:
74 if path:
75 return path
75 return path
76 if pycompat.osname == 'nt':
76 if pycompat.osname == 'nt':
77 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
77 appdata = pycompat.osgetenv('LOCALAPPDATA',\
78 pycompat.osgetenv('APPDATA'))
78 if appdata:
79 if appdata:
79 return os.path.join(appdata, longname)
80 return os.path.join(appdata, longname)
80 elif platform.system() == 'Darwin':
81 elif platform.system() == 'Darwin':
81 home = os.getenv('HOME')
82 home = pycompat.osgetenv('HOME')
82 if home:
83 if home:
83 return os.path.join(home, 'Library', 'Caches', longname)
84 return os.path.join(home, 'Library', 'Caches', longname)
84 elif pycompat.osname == 'posix':
85 elif pycompat.osname == 'posix':
85 path = os.getenv('XDG_CACHE_HOME')
86 path = pycompat.osgetenv('XDG_CACHE_HOME')
86 if path:
87 if path:
87 return os.path.join(path, longname)
88 return os.path.join(path, longname)
88 home = os.getenv('HOME')
89 home = pycompat.osgetenv('HOME')
89 if home:
90 if home:
90 return os.path.join(home, '.cache', longname)
91 return os.path.join(home, '.cache', longname)
91 else:
92 else:
92 raise error.Abort(_('unknown operating system: %s\n')
93 raise error.Abort(_('unknown operating system: %s\n')
93 % pycompat.osname)
94 % pycompat.osname)
94 raise error.Abort(_('unknown %s usercache location') % longname)
95 raise error.Abort(_('unknown %s usercache location') % longname)
95
96
96 def inusercache(ui, hash):
97 def inusercache(ui, hash):
97 path = usercachepath(ui, hash)
98 path = usercachepath(ui, hash)
98 return os.path.exists(path)
99 return os.path.exists(path)
99
100
100 def findfile(repo, hash):
101 def findfile(repo, hash):
101 '''Return store path of the largefile with the specified hash.
102 '''Return store path of the largefile with the specified hash.
102 As a side effect, the file might be linked from user cache.
103 As a side effect, the file might be linked from user cache.
103 Return None if the file can't be found locally.'''
104 Return None if the file can't be found locally.'''
104 path, exists = findstorepath(repo, hash)
105 path, exists = findstorepath(repo, hash)
105 if exists:
106 if exists:
106 repo.ui.note(_('found %s in store\n') % hash)
107 repo.ui.note(_('found %s in store\n') % hash)
107 return path
108 return path
108 elif inusercache(repo.ui, hash):
109 elif inusercache(repo.ui, hash):
109 repo.ui.note(_('found %s in system cache\n') % hash)
110 repo.ui.note(_('found %s in system cache\n') % hash)
110 path = storepath(repo, hash)
111 path = storepath(repo, hash)
111 link(usercachepath(repo.ui, hash), path)
112 link(usercachepath(repo.ui, hash), path)
112 return path
113 return path
113 return None
114 return None
114
115
115 class largefilesdirstate(dirstate.dirstate):
116 class largefilesdirstate(dirstate.dirstate):
116 def __getitem__(self, key):
117 def __getitem__(self, key):
117 return super(largefilesdirstate, self).__getitem__(unixpath(key))
118 return super(largefilesdirstate, self).__getitem__(unixpath(key))
118 def normal(self, f):
119 def normal(self, f):
119 return super(largefilesdirstate, self).normal(unixpath(f))
120 return super(largefilesdirstate, self).normal(unixpath(f))
120 def remove(self, f):
121 def remove(self, f):
121 return super(largefilesdirstate, self).remove(unixpath(f))
122 return super(largefilesdirstate, self).remove(unixpath(f))
122 def add(self, f):
123 def add(self, f):
123 return super(largefilesdirstate, self).add(unixpath(f))
124 return super(largefilesdirstate, self).add(unixpath(f))
124 def drop(self, f):
125 def drop(self, f):
125 return super(largefilesdirstate, self).drop(unixpath(f))
126 return super(largefilesdirstate, self).drop(unixpath(f))
126 def forget(self, f):
127 def forget(self, f):
127 return super(largefilesdirstate, self).forget(unixpath(f))
128 return super(largefilesdirstate, self).forget(unixpath(f))
128 def normallookup(self, f):
129 def normallookup(self, f):
129 return super(largefilesdirstate, self).normallookup(unixpath(f))
130 return super(largefilesdirstate, self).normallookup(unixpath(f))
130 def _ignore(self, f):
131 def _ignore(self, f):
131 return False
132 return False
132 def write(self, tr=False):
133 def write(self, tr=False):
133 # (1) disable PENDING mode always
134 # (1) disable PENDING mode always
134 # (lfdirstate isn't yet managed as a part of the transaction)
135 # (lfdirstate isn't yet managed as a part of the transaction)
135 # (2) avoid develwarn 'use dirstate.write with ....'
136 # (2) avoid develwarn 'use dirstate.write with ....'
136 super(largefilesdirstate, self).write(None)
137 super(largefilesdirstate, self).write(None)
137
138
138 def openlfdirstate(ui, repo, create=True):
139 def openlfdirstate(ui, repo, create=True):
139 '''
140 '''
140 Return a dirstate object that tracks largefiles: i.e. its root is
141 Return a dirstate object that tracks largefiles: i.e. its root is
141 the repo root, but it is saved in .hg/largefiles/dirstate.
142 the repo root, but it is saved in .hg/largefiles/dirstate.
142 '''
143 '''
143 vfs = repo.vfs
144 vfs = repo.vfs
144 lfstoredir = longname
145 lfstoredir = longname
145 opener = scmutil.opener(vfs.join(lfstoredir))
146 opener = scmutil.opener(vfs.join(lfstoredir))
146 lfdirstate = largefilesdirstate(opener, ui, repo.root,
147 lfdirstate = largefilesdirstate(opener, ui, repo.root,
147 repo.dirstate._validate)
148 repo.dirstate._validate)
148
149
149 # If the largefiles dirstate does not exist, populate and create
150 # If the largefiles dirstate does not exist, populate and create
150 # it. This ensures that we create it on the first meaningful
151 # it. This ensures that we create it on the first meaningful
151 # largefiles operation in a new clone.
152 # largefiles operation in a new clone.
152 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
153 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
153 matcher = getstandinmatcher(repo)
154 matcher = getstandinmatcher(repo)
154 standins = repo.dirstate.walk(matcher, [], False, False)
155 standins = repo.dirstate.walk(matcher, [], False, False)
155
156
156 if len(standins) > 0:
157 if len(standins) > 0:
157 vfs.makedirs(lfstoredir)
158 vfs.makedirs(lfstoredir)
158
159
159 for standin in standins:
160 for standin in standins:
160 lfile = splitstandin(standin)
161 lfile = splitstandin(standin)
161 lfdirstate.normallookup(lfile)
162 lfdirstate.normallookup(lfile)
162 return lfdirstate
163 return lfdirstate
163
164
164 def lfdirstatestatus(lfdirstate, repo):
165 def lfdirstatestatus(lfdirstate, repo):
165 wctx = repo['.']
166 wctx = repo['.']
166 match = matchmod.always(repo.root, repo.getcwd())
167 match = matchmod.always(repo.root, repo.getcwd())
167 unsure, s = lfdirstate.status(match, [], False, False, False)
168 unsure, s = lfdirstate.status(match, [], False, False, False)
168 modified, clean = s.modified, s.clean
169 modified, clean = s.modified, s.clean
169 for lfile in unsure:
170 for lfile in unsure:
170 try:
171 try:
171 fctx = wctx[standin(lfile)]
172 fctx = wctx[standin(lfile)]
172 except LookupError:
173 except LookupError:
173 fctx = None
174 fctx = None
174 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
175 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
175 modified.append(lfile)
176 modified.append(lfile)
176 else:
177 else:
177 clean.append(lfile)
178 clean.append(lfile)
178 lfdirstate.normal(lfile)
179 lfdirstate.normal(lfile)
179 return s
180 return s
180
181
181 def listlfiles(repo, rev=None, matcher=None):
182 def listlfiles(repo, rev=None, matcher=None):
182 '''return a list of largefiles in the working copy or the
183 '''return a list of largefiles in the working copy or the
183 specified changeset'''
184 specified changeset'''
184
185
185 if matcher is None:
186 if matcher is None:
186 matcher = getstandinmatcher(repo)
187 matcher = getstandinmatcher(repo)
187
188
188 # ignore unknown files in working directory
189 # ignore unknown files in working directory
189 return [splitstandin(f)
190 return [splitstandin(f)
190 for f in repo[rev].walk(matcher)
191 for f in repo[rev].walk(matcher)
191 if rev is not None or repo.dirstate[f] != '?']
192 if rev is not None or repo.dirstate[f] != '?']
192
193
193 def instore(repo, hash, forcelocal=False):
194 def instore(repo, hash, forcelocal=False):
194 '''Return true if a largefile with the given hash exists in the store'''
195 '''Return true if a largefile with the given hash exists in the store'''
195 return os.path.exists(storepath(repo, hash, forcelocal))
196 return os.path.exists(storepath(repo, hash, forcelocal))
196
197
197 def storepath(repo, hash, forcelocal=False):
198 def storepath(repo, hash, forcelocal=False):
198 '''Return the correct location in the repository largefiles store for a
199 '''Return the correct location in the repository largefiles store for a
199 file with the given hash.'''
200 file with the given hash.'''
200 if not forcelocal and repo.shared():
201 if not forcelocal and repo.shared():
201 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
202 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
202 return repo.join(longname, hash)
203 return repo.join(longname, hash)
203
204
204 def findstorepath(repo, hash):
205 def findstorepath(repo, hash):
205 '''Search through the local store path(s) to find the file for the given
206 '''Search through the local store path(s) to find the file for the given
206 hash. If the file is not found, its path in the primary store is returned.
207 hash. If the file is not found, its path in the primary store is returned.
207 The return value is a tuple of (path, exists(path)).
208 The return value is a tuple of (path, exists(path)).
208 '''
209 '''
209 # For shared repos, the primary store is in the share source. But for
210 # For shared repos, the primary store is in the share source. But for
210 # backward compatibility, force a lookup in the local store if it wasn't
211 # backward compatibility, force a lookup in the local store if it wasn't
211 # found in the share source.
212 # found in the share source.
212 path = storepath(repo, hash, False)
213 path = storepath(repo, hash, False)
213
214
214 if instore(repo, hash):
215 if instore(repo, hash):
215 return (path, True)
216 return (path, True)
216 elif repo.shared() and instore(repo, hash, True):
217 elif repo.shared() and instore(repo, hash, True):
217 return storepath(repo, hash, True), True
218 return storepath(repo, hash, True), True
218
219
219 return (path, False)
220 return (path, False)
220
221
221 def copyfromcache(repo, hash, filename):
222 def copyfromcache(repo, hash, filename):
222 '''Copy the specified largefile from the repo or system cache to
223 '''Copy the specified largefile from the repo or system cache to
223 filename in the repository. Return true on success or false if the
224 filename in the repository. Return true on success or false if the
224 file was not found in either cache (which should not happened:
225 file was not found in either cache (which should not happened:
225 this is meant to be called only after ensuring that the needed
226 this is meant to be called only after ensuring that the needed
226 largefile exists in the cache).'''
227 largefile exists in the cache).'''
227 wvfs = repo.wvfs
228 wvfs = repo.wvfs
228 path = findfile(repo, hash)
229 path = findfile(repo, hash)
229 if path is None:
230 if path is None:
230 return False
231 return False
231 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
232 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
232 # The write may fail before the file is fully written, but we
233 # The write may fail before the file is fully written, but we
233 # don't use atomic writes in the working copy.
234 # don't use atomic writes in the working copy.
234 with open(path, 'rb') as srcfd:
235 with open(path, 'rb') as srcfd:
235 with wvfs(filename, 'wb') as destfd:
236 with wvfs(filename, 'wb') as destfd:
236 gothash = copyandhash(
237 gothash = copyandhash(
237 util.filechunkiter(srcfd), destfd)
238 util.filechunkiter(srcfd), destfd)
238 if gothash != hash:
239 if gothash != hash:
239 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
240 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
240 % (filename, path, gothash))
241 % (filename, path, gothash))
241 wvfs.unlink(filename)
242 wvfs.unlink(filename)
242 return False
243 return False
243 return True
244 return True
244
245
245 def copytostore(repo, rev, file, uploaded=False):
246 def copytostore(repo, rev, file, uploaded=False):
246 wvfs = repo.wvfs
247 wvfs = repo.wvfs
247 hash = readstandin(repo, file, rev)
248 hash = readstandin(repo, file, rev)
248 if instore(repo, hash):
249 if instore(repo, hash):
249 return
250 return
250 if wvfs.exists(file):
251 if wvfs.exists(file):
251 copytostoreabsolute(repo, wvfs.join(file), hash)
252 copytostoreabsolute(repo, wvfs.join(file), hash)
252 else:
253 else:
253 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
254 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
254 (file, hash))
255 (file, hash))
255
256
256 def copyalltostore(repo, node):
257 def copyalltostore(repo, node):
257 '''Copy all largefiles in a given revision to the store'''
258 '''Copy all largefiles in a given revision to the store'''
258
259
259 ctx = repo[node]
260 ctx = repo[node]
260 for filename in ctx.files():
261 for filename in ctx.files():
261 if isstandin(filename) and filename in ctx.manifest():
262 if isstandin(filename) and filename in ctx.manifest():
262 realfile = splitstandin(filename)
263 realfile = splitstandin(filename)
263 copytostore(repo, ctx.node(), realfile)
264 copytostore(repo, ctx.node(), realfile)
264
265
265 def copytostoreabsolute(repo, file, hash):
266 def copytostoreabsolute(repo, file, hash):
266 if inusercache(repo.ui, hash):
267 if inusercache(repo.ui, hash):
267 link(usercachepath(repo.ui, hash), storepath(repo, hash))
268 link(usercachepath(repo.ui, hash), storepath(repo, hash))
268 else:
269 else:
269 util.makedirs(os.path.dirname(storepath(repo, hash)))
270 util.makedirs(os.path.dirname(storepath(repo, hash)))
270 with open(file, 'rb') as srcf:
271 with open(file, 'rb') as srcf:
271 with util.atomictempfile(storepath(repo, hash),
272 with util.atomictempfile(storepath(repo, hash),
272 createmode=repo.store.createmode) as dstf:
273 createmode=repo.store.createmode) as dstf:
273 for chunk in util.filechunkiter(srcf):
274 for chunk in util.filechunkiter(srcf):
274 dstf.write(chunk)
275 dstf.write(chunk)
275 linktousercache(repo, hash)
276 linktousercache(repo, hash)
276
277
277 def linktousercache(repo, hash):
278 def linktousercache(repo, hash):
278 '''Link / copy the largefile with the specified hash from the store
279 '''Link / copy the largefile with the specified hash from the store
279 to the cache.'''
280 to the cache.'''
280 path = usercachepath(repo.ui, hash)
281 path = usercachepath(repo.ui, hash)
281 link(storepath(repo, hash), path)
282 link(storepath(repo, hash), path)
282
283
283 def getstandinmatcher(repo, rmatcher=None):
284 def getstandinmatcher(repo, rmatcher=None):
284 '''Return a match object that applies rmatcher to the standin directory'''
285 '''Return a match object that applies rmatcher to the standin directory'''
285 wvfs = repo.wvfs
286 wvfs = repo.wvfs
286 standindir = shortname
287 standindir = shortname
287
288
288 # no warnings about missing files or directories
289 # no warnings about missing files or directories
289 badfn = lambda f, msg: None
290 badfn = lambda f, msg: None
290
291
291 if rmatcher and not rmatcher.always():
292 if rmatcher and not rmatcher.always():
292 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
293 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
293 if not pats:
294 if not pats:
294 pats = [wvfs.join(standindir)]
295 pats = [wvfs.join(standindir)]
295 match = scmutil.match(repo[None], pats, badfn=badfn)
296 match = scmutil.match(repo[None], pats, badfn=badfn)
296 # if pats is empty, it would incorrectly always match, so clear _always
297 # if pats is empty, it would incorrectly always match, so clear _always
297 match._always = False
298 match._always = False
298 else:
299 else:
299 # no patterns: relative to repo root
300 # no patterns: relative to repo root
300 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
301 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
301 return match
302 return match
302
303
303 def composestandinmatcher(repo, rmatcher):
304 def composestandinmatcher(repo, rmatcher):
304 '''Return a matcher that accepts standins corresponding to the
305 '''Return a matcher that accepts standins corresponding to the
305 files accepted by rmatcher. Pass the list of files in the matcher
306 files accepted by rmatcher. Pass the list of files in the matcher
306 as the paths specified by the user.'''
307 as the paths specified by the user.'''
307 smatcher = getstandinmatcher(repo, rmatcher)
308 smatcher = getstandinmatcher(repo, rmatcher)
308 isstandin = smatcher.matchfn
309 isstandin = smatcher.matchfn
309 def composedmatchfn(f):
310 def composedmatchfn(f):
310 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
311 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
311 smatcher.matchfn = composedmatchfn
312 smatcher.matchfn = composedmatchfn
312
313
313 return smatcher
314 return smatcher
314
315
315 def standin(filename):
316 def standin(filename):
316 '''Return the repo-relative path to the standin for the specified big
317 '''Return the repo-relative path to the standin for the specified big
317 file.'''
318 file.'''
318 # Notes:
319 # Notes:
319 # 1) Some callers want an absolute path, but for instance addlargefiles
320 # 1) Some callers want an absolute path, but for instance addlargefiles
320 # needs it repo-relative so it can be passed to repo[None].add(). So
321 # needs it repo-relative so it can be passed to repo[None].add(). So
321 # leave it up to the caller to use repo.wjoin() to get an absolute path.
322 # leave it up to the caller to use repo.wjoin() to get an absolute path.
322 # 2) Join with '/' because that's what dirstate always uses, even on
323 # 2) Join with '/' because that's what dirstate always uses, even on
323 # Windows. Change existing separator to '/' first in case we are
324 # Windows. Change existing separator to '/' first in case we are
324 # passed filenames from an external source (like the command line).
325 # passed filenames from an external source (like the command line).
325 return shortnameslash + util.pconvert(filename)
326 return shortnameslash + util.pconvert(filename)
326
327
327 def isstandin(filename):
328 def isstandin(filename):
328 '''Return true if filename is a big file standin. filename must be
329 '''Return true if filename is a big file standin. filename must be
329 in Mercurial's internal form (slash-separated).'''
330 in Mercurial's internal form (slash-separated).'''
330 return filename.startswith(shortnameslash)
331 return filename.startswith(shortnameslash)
331
332
332 def splitstandin(filename):
333 def splitstandin(filename):
333 # Split on / because that's what dirstate always uses, even on Windows.
334 # Split on / because that's what dirstate always uses, even on Windows.
334 # Change local separator to / first just in case we are passed filenames
335 # Change local separator to / first just in case we are passed filenames
335 # from an external source (like the command line).
336 # from an external source (like the command line).
336 bits = util.pconvert(filename).split('/', 1)
337 bits = util.pconvert(filename).split('/', 1)
337 if len(bits) == 2 and bits[0] == shortname:
338 if len(bits) == 2 and bits[0] == shortname:
338 return bits[1]
339 return bits[1]
339 else:
340 else:
340 return None
341 return None
341
342
342 def updatestandin(repo, standin):
343 def updatestandin(repo, standin):
343 file = repo.wjoin(splitstandin(standin))
344 file = repo.wjoin(splitstandin(standin))
344 if repo.wvfs.exists(splitstandin(standin)):
345 if repo.wvfs.exists(splitstandin(standin)):
345 hash = hashfile(file)
346 hash = hashfile(file)
346 executable = getexecutable(file)
347 executable = getexecutable(file)
347 writestandin(repo, standin, hash, executable)
348 writestandin(repo, standin, hash, executable)
348 else:
349 else:
349 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
350 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
350
351
351 def readstandin(repo, filename, node=None):
352 def readstandin(repo, filename, node=None):
352 '''read hex hash from standin for filename at given node, or working
353 '''read hex hash from standin for filename at given node, or working
353 directory if no node is given'''
354 directory if no node is given'''
354 return repo[node][standin(filename)].data().strip()
355 return repo[node][standin(filename)].data().strip()
355
356
356 def writestandin(repo, standin, hash, executable):
357 def writestandin(repo, standin, hash, executable):
357 '''write hash to <repo.root>/<standin>'''
358 '''write hash to <repo.root>/<standin>'''
358 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
359 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
359
360
360 def copyandhash(instream, outfile):
361 def copyandhash(instream, outfile):
361 '''Read bytes from instream (iterable) and write them to outfile,
362 '''Read bytes from instream (iterable) and write them to outfile,
362 computing the SHA-1 hash of the data along the way. Return the hash.'''
363 computing the SHA-1 hash of the data along the way. Return the hash.'''
363 hasher = hashlib.sha1('')
364 hasher = hashlib.sha1('')
364 for data in instream:
365 for data in instream:
365 hasher.update(data)
366 hasher.update(data)
366 outfile.write(data)
367 outfile.write(data)
367 return hasher.hexdigest()
368 return hasher.hexdigest()
368
369
369 def hashrepofile(repo, file):
370 def hashrepofile(repo, file):
370 return hashfile(repo.wjoin(file))
371 return hashfile(repo.wjoin(file))
371
372
372 def hashfile(file):
373 def hashfile(file):
373 if not os.path.exists(file):
374 if not os.path.exists(file):
374 return ''
375 return ''
375 hasher = hashlib.sha1('')
376 hasher = hashlib.sha1('')
376 with open(file, 'rb') as fd:
377 with open(file, 'rb') as fd:
377 for data in util.filechunkiter(fd):
378 for data in util.filechunkiter(fd):
378 hasher.update(data)
379 hasher.update(data)
379 return hasher.hexdigest()
380 return hasher.hexdigest()
380
381
381 def getexecutable(filename):
382 def getexecutable(filename):
382 mode = os.stat(filename).st_mode
383 mode = os.stat(filename).st_mode
383 return ((mode & stat.S_IXUSR) and
384 return ((mode & stat.S_IXUSR) and
384 (mode & stat.S_IXGRP) and
385 (mode & stat.S_IXGRP) and
385 (mode & stat.S_IXOTH))
386 (mode & stat.S_IXOTH))
386
387
387 def urljoin(first, second, *arg):
388 def urljoin(first, second, *arg):
388 def join(left, right):
389 def join(left, right):
389 if not left.endswith('/'):
390 if not left.endswith('/'):
390 left += '/'
391 left += '/'
391 if right.startswith('/'):
392 if right.startswith('/'):
392 right = right[1:]
393 right = right[1:]
393 return left + right
394 return left + right
394
395
395 url = join(first, second)
396 url = join(first, second)
396 for a in arg:
397 for a in arg:
397 url = join(url, a)
398 url = join(url, a)
398 return url
399 return url
399
400
400 def hexsha1(data):
401 def hexsha1(data):
401 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 object data"""
403 object data"""
403 h = hashlib.sha1()
404 h = hashlib.sha1()
404 for chunk in util.filechunkiter(data):
405 for chunk in util.filechunkiter(data):
405 h.update(chunk)
406 h.update(chunk)
406 return h.hexdigest()
407 return h.hexdigest()
407
408
408 def httpsendfile(ui, filename):
409 def httpsendfile(ui, filename):
409 return httpconnection.httpsendfile(ui, filename, 'rb')
410 return httpconnection.httpsendfile(ui, filename, 'rb')
410
411
411 def unixpath(path):
412 def unixpath(path):
412 '''Return a version of path normalized for use with the lfdirstate.'''
413 '''Return a version of path normalized for use with the lfdirstate.'''
413 return util.pconvert(os.path.normpath(path))
414 return util.pconvert(os.path.normpath(path))
414
415
415 def islfilesrepo(repo):
416 def islfilesrepo(repo):
416 '''Return true if the repo is a largefile repo.'''
417 '''Return true if the repo is a largefile repo.'''
417 if ('largefiles' in repo.requirements and
418 if ('largefiles' in repo.requirements and
418 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 return True
420 return True
420
421
421 return any(openlfdirstate(repo.ui, repo, False))
422 return any(openlfdirstate(repo.ui, repo, False))
422
423
423 class storeprotonotcapable(Exception):
424 class storeprotonotcapable(Exception):
424 def __init__(self, storetypes):
425 def __init__(self, storetypes):
425 self.storetypes = storetypes
426 self.storetypes = storetypes
426
427
427 def getstandinsstate(repo):
428 def getstandinsstate(repo):
428 standins = []
429 standins = []
429 matcher = getstandinmatcher(repo)
430 matcher = getstandinmatcher(repo)
430 for standin in repo.dirstate.walk(matcher, [], False, False):
431 for standin in repo.dirstate.walk(matcher, [], False, False):
431 lfile = splitstandin(standin)
432 lfile = splitstandin(standin)
432 try:
433 try:
433 hash = readstandin(repo, lfile)
434 hash = readstandin(repo, lfile)
434 except IOError:
435 except IOError:
435 hash = None
436 hash = None
436 standins.append((lfile, hash))
437 standins.append((lfile, hash))
437 return standins
438 return standins
438
439
439 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 lfstandin = standin(lfile)
441 lfstandin = standin(lfile)
441 if lfstandin in repo.dirstate:
442 if lfstandin in repo.dirstate:
442 stat = repo.dirstate._map[lfstandin]
443 stat = repo.dirstate._map[lfstandin]
443 state, mtime = stat[0], stat[3]
444 state, mtime = stat[0], stat[3]
444 else:
445 else:
445 state, mtime = '?', -1
446 state, mtime = '?', -1
446 if state == 'n':
447 if state == 'n':
447 if (normallookup or mtime < 0 or
448 if (normallookup or mtime < 0 or
448 not repo.wvfs.exists(lfile)):
449 not repo.wvfs.exists(lfile)):
449 # state 'n' doesn't ensure 'clean' in this case
450 # state 'n' doesn't ensure 'clean' in this case
450 lfdirstate.normallookup(lfile)
451 lfdirstate.normallookup(lfile)
451 else:
452 else:
452 lfdirstate.normal(lfile)
453 lfdirstate.normal(lfile)
453 elif state == 'm':
454 elif state == 'm':
454 lfdirstate.normallookup(lfile)
455 lfdirstate.normallookup(lfile)
455 elif state == 'r':
456 elif state == 'r':
456 lfdirstate.remove(lfile)
457 lfdirstate.remove(lfile)
457 elif state == 'a':
458 elif state == 'a':
458 lfdirstate.add(lfile)
459 lfdirstate.add(lfile)
459 elif state == '?':
460 elif state == '?':
460 lfdirstate.drop(lfile)
461 lfdirstate.drop(lfile)
461
462
462 def markcommitted(orig, ctx, node):
463 def markcommitted(orig, ctx, node):
463 repo = ctx.repo()
464 repo = ctx.repo()
464
465
465 orig(node)
466 orig(node)
466
467
467 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 # because files coming from the 2nd parent are omitted in the latter.
469 # because files coming from the 2nd parent are omitted in the latter.
469 #
470 #
470 # The former should be used to get targets of "synclfdirstate",
471 # The former should be used to get targets of "synclfdirstate",
471 # because such files:
472 # because such files:
472 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 # - have to be marked as "n" after commit, but
474 # - have to be marked as "n" after commit, but
474 # - aren't listed in "repo[node].files()"
475 # - aren't listed in "repo[node].files()"
475
476
476 lfdirstate = openlfdirstate(repo.ui, repo)
477 lfdirstate = openlfdirstate(repo.ui, repo)
477 for f in ctx.files():
478 for f in ctx.files():
478 if isstandin(f):
479 if isstandin(f):
479 lfile = splitstandin(f)
480 lfile = splitstandin(f)
480 synclfdirstate(repo, lfdirstate, lfile, False)
481 synclfdirstate(repo, lfdirstate, lfile, False)
481 lfdirstate.write()
482 lfdirstate.write()
482
483
483 # As part of committing, copy all of the largefiles into the cache.
484 # As part of committing, copy all of the largefiles into the cache.
484 copyalltostore(repo, node)
485 copyalltostore(repo, node)
485
486
486 def getlfilestoupdate(oldstandins, newstandins):
487 def getlfilestoupdate(oldstandins, newstandins):
487 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
488 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
488 filelist = []
489 filelist = []
489 for f in changedstandins:
490 for f in changedstandins:
490 if f[0] not in filelist:
491 if f[0] not in filelist:
491 filelist.append(f[0])
492 filelist.append(f[0])
492 return filelist
493 return filelist
493
494
494 def getlfilestoupload(repo, missing, addfunc):
495 def getlfilestoupload(repo, missing, addfunc):
495 for i, n in enumerate(missing):
496 for i, n in enumerate(missing):
496 repo.ui.progress(_('finding outgoing largefiles'), i,
497 repo.ui.progress(_('finding outgoing largefiles'), i,
497 unit=_('revisions'), total=len(missing))
498 unit=_('revisions'), total=len(missing))
498 parents = [p for p in repo[n].parents() if p != node.nullid]
499 parents = [p for p in repo[n].parents() if p != node.nullid]
499
500
500 oldlfstatus = repo.lfstatus
501 oldlfstatus = repo.lfstatus
501 repo.lfstatus = False
502 repo.lfstatus = False
502 try:
503 try:
503 ctx = repo[n]
504 ctx = repo[n]
504 finally:
505 finally:
505 repo.lfstatus = oldlfstatus
506 repo.lfstatus = oldlfstatus
506
507
507 files = set(ctx.files())
508 files = set(ctx.files())
508 if len(parents) == 2:
509 if len(parents) == 2:
509 mc = ctx.manifest()
510 mc = ctx.manifest()
510 mp1 = ctx.parents()[0].manifest()
511 mp1 = ctx.parents()[0].manifest()
511 mp2 = ctx.parents()[1].manifest()
512 mp2 = ctx.parents()[1].manifest()
512 for f in mp1:
513 for f in mp1:
513 if f not in mc:
514 if f not in mc:
514 files.add(f)
515 files.add(f)
515 for f in mp2:
516 for f in mp2:
516 if f not in mc:
517 if f not in mc:
517 files.add(f)
518 files.add(f)
518 for f in mc:
519 for f in mc:
519 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
520 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
520 files.add(f)
521 files.add(f)
521 for fn in files:
522 for fn in files:
522 if isstandin(fn) and fn in ctx:
523 if isstandin(fn) and fn in ctx:
523 addfunc(fn, ctx[fn].data().strip())
524 addfunc(fn, ctx[fn].data().strip())
524 repo.ui.progress(_('finding outgoing largefiles'), None)
525 repo.ui.progress(_('finding outgoing largefiles'), None)
525
526
526 def updatestandinsbymatch(repo, match):
527 def updatestandinsbymatch(repo, match):
527 '''Update standins in the working directory according to specified match
528 '''Update standins in the working directory according to specified match
528
529
529 This returns (possibly modified) ``match`` object to be used for
530 This returns (possibly modified) ``match`` object to be used for
530 subsequent commit process.
531 subsequent commit process.
531 '''
532 '''
532
533
533 ui = repo.ui
534 ui = repo.ui
534
535
535 # Case 1: user calls commit with no specific files or
536 # Case 1: user calls commit with no specific files or
536 # include/exclude patterns: refresh and commit all files that
537 # include/exclude patterns: refresh and commit all files that
537 # are "dirty".
538 # are "dirty".
538 if match is None or match.always():
539 if match is None or match.always():
539 # Spend a bit of time here to get a list of files we know
540 # Spend a bit of time here to get a list of files we know
540 # are modified so we can compare only against those.
541 # are modified so we can compare only against those.
541 # It can cost a lot of time (several seconds)
542 # It can cost a lot of time (several seconds)
542 # otherwise to update all standins if the largefiles are
543 # otherwise to update all standins if the largefiles are
543 # large.
544 # large.
544 lfdirstate = openlfdirstate(ui, repo)
545 lfdirstate = openlfdirstate(ui, repo)
545 dirtymatch = matchmod.always(repo.root, repo.getcwd())
546 dirtymatch = matchmod.always(repo.root, repo.getcwd())
546 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
547 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
547 False)
548 False)
548 modifiedfiles = unsure + s.modified + s.added + s.removed
549 modifiedfiles = unsure + s.modified + s.added + s.removed
549 lfiles = listlfiles(repo)
550 lfiles = listlfiles(repo)
550 # this only loops through largefiles that exist (not
551 # this only loops through largefiles that exist (not
551 # removed/renamed)
552 # removed/renamed)
552 for lfile in lfiles:
553 for lfile in lfiles:
553 if lfile in modifiedfiles:
554 if lfile in modifiedfiles:
554 if repo.wvfs.exists(standin(lfile)):
555 if repo.wvfs.exists(standin(lfile)):
555 # this handles the case where a rebase is being
556 # this handles the case where a rebase is being
556 # performed and the working copy is not updated
557 # performed and the working copy is not updated
557 # yet.
558 # yet.
558 if repo.wvfs.exists(lfile):
559 if repo.wvfs.exists(lfile):
559 updatestandin(repo,
560 updatestandin(repo,
560 standin(lfile))
561 standin(lfile))
561
562
562 return match
563 return match
563
564
564 lfiles = listlfiles(repo)
565 lfiles = listlfiles(repo)
565 match._files = repo._subdirlfs(match.files(), lfiles)
566 match._files = repo._subdirlfs(match.files(), lfiles)
566
567
567 # Case 2: user calls commit with specified patterns: refresh
568 # Case 2: user calls commit with specified patterns: refresh
568 # any matching big files.
569 # any matching big files.
569 smatcher = composestandinmatcher(repo, match)
570 smatcher = composestandinmatcher(repo, match)
570 standins = repo.dirstate.walk(smatcher, [], False, False)
571 standins = repo.dirstate.walk(smatcher, [], False, False)
571
572
572 # No matching big files: get out of the way and pass control to
573 # No matching big files: get out of the way and pass control to
573 # the usual commit() method.
574 # the usual commit() method.
574 if not standins:
575 if not standins:
575 return match
576 return match
576
577
577 # Refresh all matching big files. It's possible that the
578 # Refresh all matching big files. It's possible that the
578 # commit will end up failing, in which case the big files will
579 # commit will end up failing, in which case the big files will
579 # stay refreshed. No harm done: the user modified them and
580 # stay refreshed. No harm done: the user modified them and
580 # asked to commit them, so sooner or later we're going to
581 # asked to commit them, so sooner or later we're going to
581 # refresh the standins. Might as well leave them refreshed.
582 # refresh the standins. Might as well leave them refreshed.
582 lfdirstate = openlfdirstate(ui, repo)
583 lfdirstate = openlfdirstate(ui, repo)
583 for fstandin in standins:
584 for fstandin in standins:
584 lfile = splitstandin(fstandin)
585 lfile = splitstandin(fstandin)
585 if lfdirstate[lfile] != 'r':
586 if lfdirstate[lfile] != 'r':
586 updatestandin(repo, fstandin)
587 updatestandin(repo, fstandin)
587
588
588 # Cook up a new matcher that only matches regular files or
589 # Cook up a new matcher that only matches regular files or
589 # standins corresponding to the big files requested by the
590 # standins corresponding to the big files requested by the
590 # user. Have to modify _files to prevent commit() from
591 # user. Have to modify _files to prevent commit() from
591 # complaining "not tracked" for big files.
592 # complaining "not tracked" for big files.
592 match = copy.copy(match)
593 match = copy.copy(match)
593 origmatchfn = match.matchfn
594 origmatchfn = match.matchfn
594
595
595 # Check both the list of largefiles and the list of
596 # Check both the list of largefiles and the list of
596 # standins because if a largefile was removed, it
597 # standins because if a largefile was removed, it
597 # won't be in the list of largefiles at this point
598 # won't be in the list of largefiles at this point
598 match._files += sorted(standins)
599 match._files += sorted(standins)
599
600
600 actualfiles = []
601 actualfiles = []
601 for f in match._files:
602 for f in match._files:
602 fstandin = standin(f)
603 fstandin = standin(f)
603
604
604 # For largefiles, only one of the normal and standin should be
605 # For largefiles, only one of the normal and standin should be
605 # committed (except if one of them is a remove). In the case of a
606 # committed (except if one of them is a remove). In the case of a
606 # standin removal, drop the normal file if it is unknown to dirstate.
607 # standin removal, drop the normal file if it is unknown to dirstate.
607 # Thus, skip plain largefile names but keep the standin.
608 # Thus, skip plain largefile names but keep the standin.
608 if f in lfiles or fstandin in standins:
609 if f in lfiles or fstandin in standins:
609 if repo.dirstate[fstandin] != 'r':
610 if repo.dirstate[fstandin] != 'r':
610 if repo.dirstate[f] != 'r':
611 if repo.dirstate[f] != 'r':
611 continue
612 continue
612 elif repo.dirstate[f] == '?':
613 elif repo.dirstate[f] == '?':
613 continue
614 continue
614
615
615 actualfiles.append(f)
616 actualfiles.append(f)
616 match._files = actualfiles
617 match._files = actualfiles
617
618
618 def matchfn(f):
619 def matchfn(f):
619 if origmatchfn(f):
620 if origmatchfn(f):
620 return f not in lfiles
621 return f not in lfiles
621 else:
622 else:
622 return f in standins
623 return f in standins
623
624
624 match.matchfn = matchfn
625 match.matchfn = matchfn
625
626
626 return match
627 return match
627
628
628 class automatedcommithook(object):
629 class automatedcommithook(object):
629 '''Stateful hook to update standins at the 1st commit of resuming
630 '''Stateful hook to update standins at the 1st commit of resuming
630
631
631 For efficiency, updating standins in the working directory should
632 For efficiency, updating standins in the working directory should
632 be avoided while automated committing (like rebase, transplant and
633 be avoided while automated committing (like rebase, transplant and
633 so on), because they should be updated before committing.
634 so on), because they should be updated before committing.
634
635
635 But the 1st commit of resuming automated committing (e.g. ``rebase
636 But the 1st commit of resuming automated committing (e.g. ``rebase
636 --continue``) should update them, because largefiles may be
637 --continue``) should update them, because largefiles may be
637 modified manually.
638 modified manually.
638 '''
639 '''
639 def __init__(self, resuming):
640 def __init__(self, resuming):
640 self.resuming = resuming
641 self.resuming = resuming
641
642
642 def __call__(self, repo, match):
643 def __call__(self, repo, match):
643 if self.resuming:
644 if self.resuming:
644 self.resuming = False # avoids updating at subsequent commits
645 self.resuming = False # avoids updating at subsequent commits
645 return updatestandinsbymatch(repo, match)
646 return updatestandinsbymatch(repo, match)
646 else:
647 else:
647 return match
648 return match
648
649
649 def getstatuswriter(ui, repo, forcibly=None):
650 def getstatuswriter(ui, repo, forcibly=None):
650 '''Return the function to write largefiles specific status out
651 '''Return the function to write largefiles specific status out
651
652
652 If ``forcibly`` is ``None``, this returns the last element of
653 If ``forcibly`` is ``None``, this returns the last element of
653 ``repo._lfstatuswriters`` as "default" writer function.
654 ``repo._lfstatuswriters`` as "default" writer function.
654
655
655 Otherwise, this returns the function to always write out (or
656 Otherwise, this returns the function to always write out (or
656 ignore if ``not forcibly``) status.
657 ignore if ``not forcibly``) status.
657 '''
658 '''
658 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
659 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
659 return repo._lfstatuswriters[-1]
660 return repo._lfstatuswriters[-1]
660 else:
661 else:
661 if forcibly:
662 if forcibly:
662 return ui.status # forcibly WRITE OUT
663 return ui.status # forcibly WRITE OUT
663 else:
664 else:
664 return lambda *msg, **opts: None # forcibly IGNORE
665 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,176 +1,176 b''
1 # profiling.py - profiling functions
1 # profiling.py - profiling functions
2 #
2 #
3 # Copyright 2016 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2016 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import contextlib
10 import contextlib
11 import os
12 import time
11 import time
13
12
14 from .i18n import _
13 from .i18n import _
15 from . import (
14 from . import (
16 error,
15 error,
16 pycompat,
17 util,
17 util,
18 )
18 )
19
19
20 @contextlib.contextmanager
20 @contextlib.contextmanager
21 def lsprofile(ui, fp):
21 def lsprofile(ui, fp):
22 format = ui.config('profiling', 'format', default='text')
22 format = ui.config('profiling', 'format', default='text')
23 field = ui.config('profiling', 'sort', default='inlinetime')
23 field = ui.config('profiling', 'sort', default='inlinetime')
24 limit = ui.configint('profiling', 'limit', default=30)
24 limit = ui.configint('profiling', 'limit', default=30)
25 climit = ui.configint('profiling', 'nested', default=0)
25 climit = ui.configint('profiling', 'nested', default=0)
26
26
27 if format not in ['text', 'kcachegrind']:
27 if format not in ['text', 'kcachegrind']:
28 ui.warn(_("unrecognized profiling format '%s'"
28 ui.warn(_("unrecognized profiling format '%s'"
29 " - Ignored\n") % format)
29 " - Ignored\n") % format)
30 format = 'text'
30 format = 'text'
31
31
32 try:
32 try:
33 from . import lsprof
33 from . import lsprof
34 except ImportError:
34 except ImportError:
35 raise error.Abort(_(
35 raise error.Abort(_(
36 'lsprof not available - install from '
36 'lsprof not available - install from '
37 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
37 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
38 p = lsprof.Profiler()
38 p = lsprof.Profiler()
39 p.enable(subcalls=True)
39 p.enable(subcalls=True)
40 try:
40 try:
41 yield
41 yield
42 finally:
42 finally:
43 p.disable()
43 p.disable()
44
44
45 if format == 'kcachegrind':
45 if format == 'kcachegrind':
46 from . import lsprofcalltree
46 from . import lsprofcalltree
47 calltree = lsprofcalltree.KCacheGrind(p)
47 calltree = lsprofcalltree.KCacheGrind(p)
48 calltree.output(fp)
48 calltree.output(fp)
49 else:
49 else:
50 # format == 'text'
50 # format == 'text'
51 stats = lsprof.Stats(p.getstats())
51 stats = lsprof.Stats(p.getstats())
52 stats.sort(field)
52 stats.sort(field)
53 stats.pprint(limit=limit, file=fp, climit=climit)
53 stats.pprint(limit=limit, file=fp, climit=climit)
54
54
55 @contextlib.contextmanager
55 @contextlib.contextmanager
56 def flameprofile(ui, fp):
56 def flameprofile(ui, fp):
57 try:
57 try:
58 from flamegraph import flamegraph
58 from flamegraph import flamegraph
59 except ImportError:
59 except ImportError:
60 raise error.Abort(_(
60 raise error.Abort(_(
61 'flamegraph not available - install from '
61 'flamegraph not available - install from '
62 'https://github.com/evanhempel/python-flamegraph'))
62 'https://github.com/evanhempel/python-flamegraph'))
63 # developer config: profiling.freq
63 # developer config: profiling.freq
64 freq = ui.configint('profiling', 'freq', default=1000)
64 freq = ui.configint('profiling', 'freq', default=1000)
65 filter_ = None
65 filter_ = None
66 collapse_recursion = True
66 collapse_recursion = True
67 thread = flamegraph.ProfileThread(fp, 1.0 / freq,
67 thread = flamegraph.ProfileThread(fp, 1.0 / freq,
68 filter_, collapse_recursion)
68 filter_, collapse_recursion)
69 start_time = time.clock()
69 start_time = time.clock()
70 try:
70 try:
71 thread.start()
71 thread.start()
72 yield
72 yield
73 finally:
73 finally:
74 thread.stop()
74 thread.stop()
75 thread.join()
75 thread.join()
76 print('Collected %d stack frames (%d unique) in %2.2f seconds.' % (
76 print('Collected %d stack frames (%d unique) in %2.2f seconds.' % (
77 time.clock() - start_time, thread.num_frames(),
77 time.clock() - start_time, thread.num_frames(),
78 thread.num_frames(unique=True)))
78 thread.num_frames(unique=True)))
79
79
80 @contextlib.contextmanager
80 @contextlib.contextmanager
81 def statprofile(ui, fp):
81 def statprofile(ui, fp):
82 from . import statprof
82 from . import statprof
83
83
84 freq = ui.configint('profiling', 'freq', default=1000)
84 freq = ui.configint('profiling', 'freq', default=1000)
85 if freq > 0:
85 if freq > 0:
86 # Cannot reset when profiler is already active. So silently no-op.
86 # Cannot reset when profiler is already active. So silently no-op.
87 if statprof.state.profile_level == 0:
87 if statprof.state.profile_level == 0:
88 statprof.reset(freq)
88 statprof.reset(freq)
89 else:
89 else:
90 ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq)
90 ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq)
91
91
92 statprof.start(mechanism='thread')
92 statprof.start(mechanism='thread')
93
93
94 try:
94 try:
95 yield
95 yield
96 finally:
96 finally:
97 data = statprof.stop()
97 data = statprof.stop()
98
98
99 profformat = ui.config('profiling', 'statformat', 'hotpath')
99 profformat = ui.config('profiling', 'statformat', 'hotpath')
100
100
101 formats = {
101 formats = {
102 'byline': statprof.DisplayFormats.ByLine,
102 'byline': statprof.DisplayFormats.ByLine,
103 'bymethod': statprof.DisplayFormats.ByMethod,
103 'bymethod': statprof.DisplayFormats.ByMethod,
104 'hotpath': statprof.DisplayFormats.Hotpath,
104 'hotpath': statprof.DisplayFormats.Hotpath,
105 'json': statprof.DisplayFormats.Json,
105 'json': statprof.DisplayFormats.Json,
106 }
106 }
107
107
108 if profformat in formats:
108 if profformat in formats:
109 displayformat = formats[profformat]
109 displayformat = formats[profformat]
110 else:
110 else:
111 ui.warn(_('unknown profiler output format: %s\n') % profformat)
111 ui.warn(_('unknown profiler output format: %s\n') % profformat)
112 displayformat = statprof.DisplayFormats.Hotpath
112 displayformat = statprof.DisplayFormats.Hotpath
113
113
114 statprof.display(fp, data=data, format=displayformat)
114 statprof.display(fp, data=data, format=displayformat)
115
115
116 @contextlib.contextmanager
116 @contextlib.contextmanager
117 def profile(ui):
117 def profile(ui):
118 """Start profiling.
118 """Start profiling.
119
119
120 Profiling is active when the context manager is active. When the context
120 Profiling is active when the context manager is active. When the context
121 manager exits, profiling results will be written to the configured output.
121 manager exits, profiling results will be written to the configured output.
122 """
122 """
123 profiler = os.getenv('HGPROF')
123 profiler = pycompat.osgetenv('HGPROF')
124 if profiler is None:
124 if profiler is None:
125 profiler = ui.config('profiling', 'type', default='stat')
125 profiler = ui.config('profiling', 'type', default='stat')
126 if profiler not in ('ls', 'stat', 'flame'):
126 if profiler not in ('ls', 'stat', 'flame'):
127 ui.warn(_("unrecognized profiler '%s' - ignored\n") % profiler)
127 ui.warn(_("unrecognized profiler '%s' - ignored\n") % profiler)
128 profiler = 'stat'
128 profiler = 'stat'
129
129
130 output = ui.config('profiling', 'output')
130 output = ui.config('profiling', 'output')
131
131
132 if output == 'blackbox':
132 if output == 'blackbox':
133 fp = util.stringio()
133 fp = util.stringio()
134 elif output:
134 elif output:
135 path = ui.expandpath(output)
135 path = ui.expandpath(output)
136 fp = open(path, 'wb')
136 fp = open(path, 'wb')
137 else:
137 else:
138 fp = ui.ferr
138 fp = ui.ferr
139
139
140 try:
140 try:
141 if profiler == 'ls':
141 if profiler == 'ls':
142 proffn = lsprofile
142 proffn = lsprofile
143 elif profiler == 'flame':
143 elif profiler == 'flame':
144 proffn = flameprofile
144 proffn = flameprofile
145 else:
145 else:
146 proffn = statprofile
146 proffn = statprofile
147
147
148 with proffn(ui, fp):
148 with proffn(ui, fp):
149 yield
149 yield
150
150
151 finally:
151 finally:
152 if output:
152 if output:
153 if output == 'blackbox':
153 if output == 'blackbox':
154 val = 'Profile:\n%s' % fp.getvalue()
154 val = 'Profile:\n%s' % fp.getvalue()
155 # ui.log treats the input as a format string,
155 # ui.log treats the input as a format string,
156 # so we need to escape any % signs.
156 # so we need to escape any % signs.
157 val = val.replace('%', '%%')
157 val = val.replace('%', '%%')
158 ui.log('profile', val)
158 ui.log('profile', val)
159 fp.close()
159 fp.close()
160
160
161 @contextlib.contextmanager
161 @contextlib.contextmanager
162 def maybeprofile(ui):
162 def maybeprofile(ui):
163 """Profile if enabled, else do nothing.
163 """Profile if enabled, else do nothing.
164
164
165 This context manager can be used to optionally profile if profiling
165 This context manager can be used to optionally profile if profiling
166 is enabled. Otherwise, it does nothing.
166 is enabled. Otherwise, it does nothing.
167
167
168 The purpose of this context manager is to make calling code simpler:
168 The purpose of this context manager is to make calling code simpler:
169 just use a single code path for calling into code you may want to profile
169 just use a single code path for calling into code you may want to profile
170 and this function determines whether to start profiling.
170 and this function determines whether to start profiling.
171 """
171 """
172 if ui.configbool('profiling', 'enabled'):
172 if ui.configbool('profiling', 'enabled'):
173 with profile(ui):
173 with profile(ui):
174 yield
174 yield
175 else:
175 else:
176 yield
176 yield
@@ -1,479 +1,481 b''
1 # url.py - HTTP handling for mercurial
1 # url.py - HTTP handling for mercurial
2 #
2 #
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import base64
12 import base64
13 import os
13 import os
14 import socket
14 import socket
15
15
16 from .i18n import _
16 from .i18n import _
17 from . import (
17 from . import (
18 error,
18 error,
19 httpconnection as httpconnectionmod,
19 httpconnection as httpconnectionmod,
20 keepalive,
20 keepalive,
21 pycompat,
21 sslutil,
22 sslutil,
22 util,
23 util,
23 )
24 )
24
25
25 httplib = util.httplib
26 httplib = util.httplib
26 stringio = util.stringio
27 stringio = util.stringio
27 urlerr = util.urlerr
28 urlerr = util.urlerr
28 urlreq = util.urlreq
29 urlreq = util.urlreq
29
30
30 class passwordmgr(object):
31 class passwordmgr(object):
31 def __init__(self, ui, passwddb):
32 def __init__(self, ui, passwddb):
32 self.ui = ui
33 self.ui = ui
33 self.passwddb = passwddb
34 self.passwddb = passwddb
34
35
35 def add_password(self, realm, uri, user, passwd):
36 def add_password(self, realm, uri, user, passwd):
36 return self.passwddb.add_password(realm, uri, user, passwd)
37 return self.passwddb.add_password(realm, uri, user, passwd)
37
38
38 def find_user_password(self, realm, authuri):
39 def find_user_password(self, realm, authuri):
39 authinfo = self.passwddb.find_user_password(realm, authuri)
40 authinfo = self.passwddb.find_user_password(realm, authuri)
40 user, passwd = authinfo
41 user, passwd = authinfo
41 if user and passwd:
42 if user and passwd:
42 self._writedebug(user, passwd)
43 self._writedebug(user, passwd)
43 return (user, passwd)
44 return (user, passwd)
44
45
45 if not user or not passwd:
46 if not user or not passwd:
46 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
47 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
47 if res:
48 if res:
48 group, auth = res
49 group, auth = res
49 user, passwd = auth.get('username'), auth.get('password')
50 user, passwd = auth.get('username'), auth.get('password')
50 self.ui.debug("using auth.%s.* for authentication\n" % group)
51 self.ui.debug("using auth.%s.* for authentication\n" % group)
51 if not user or not passwd:
52 if not user or not passwd:
52 u = util.url(authuri)
53 u = util.url(authuri)
53 u.query = None
54 u.query = None
54 if not self.ui.interactive():
55 if not self.ui.interactive():
55 raise error.Abort(_('http authorization required for %s') %
56 raise error.Abort(_('http authorization required for %s') %
56 util.hidepassword(str(u)))
57 util.hidepassword(str(u)))
57
58
58 self.ui.write(_("http authorization required for %s\n") %
59 self.ui.write(_("http authorization required for %s\n") %
59 util.hidepassword(str(u)))
60 util.hidepassword(str(u)))
60 self.ui.write(_("realm: %s\n") % realm)
61 self.ui.write(_("realm: %s\n") % realm)
61 if user:
62 if user:
62 self.ui.write(_("user: %s\n") % user)
63 self.ui.write(_("user: %s\n") % user)
63 else:
64 else:
64 user = self.ui.prompt(_("user:"), default=None)
65 user = self.ui.prompt(_("user:"), default=None)
65
66
66 if not passwd:
67 if not passwd:
67 passwd = self.ui.getpass()
68 passwd = self.ui.getpass()
68
69
69 self.passwddb.add_password(realm, authuri, user, passwd)
70 self.passwddb.add_password(realm, authuri, user, passwd)
70 self._writedebug(user, passwd)
71 self._writedebug(user, passwd)
71 return (user, passwd)
72 return (user, passwd)
72
73
73 def _writedebug(self, user, passwd):
74 def _writedebug(self, user, passwd):
74 msg = _('http auth: user %s, password %s\n')
75 msg = _('http auth: user %s, password %s\n')
75 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
76 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
76
77
77 def find_stored_password(self, authuri):
78 def find_stored_password(self, authuri):
78 return self.passwddb.find_user_password(None, authuri)
79 return self.passwddb.find_user_password(None, authuri)
79
80
80 class proxyhandler(urlreq.proxyhandler):
81 class proxyhandler(urlreq.proxyhandler):
81 def __init__(self, ui):
82 def __init__(self, ui):
82 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
83 proxyurl = (ui.config("http_proxy", "host") or
84 pycompat.osgetenv('http_proxy'))
83 # XXX proxyauthinfo = None
85 # XXX proxyauthinfo = None
84
86
85 if proxyurl:
87 if proxyurl:
86 # proxy can be proper url or host[:port]
88 # proxy can be proper url or host[:port]
87 if not (proxyurl.startswith('http:') or
89 if not (proxyurl.startswith('http:') or
88 proxyurl.startswith('https:')):
90 proxyurl.startswith('https:')):
89 proxyurl = 'http://' + proxyurl + '/'
91 proxyurl = 'http://' + proxyurl + '/'
90 proxy = util.url(proxyurl)
92 proxy = util.url(proxyurl)
91 if not proxy.user:
93 if not proxy.user:
92 proxy.user = ui.config("http_proxy", "user")
94 proxy.user = ui.config("http_proxy", "user")
93 proxy.passwd = ui.config("http_proxy", "passwd")
95 proxy.passwd = ui.config("http_proxy", "passwd")
94
96
95 # see if we should use a proxy for this url
97 # see if we should use a proxy for this url
96 no_list = ["localhost", "127.0.0.1"]
98 no_list = ["localhost", "127.0.0.1"]
97 no_list.extend([p.lower() for
99 no_list.extend([p.lower() for
98 p in ui.configlist("http_proxy", "no")])
100 p in ui.configlist("http_proxy", "no")])
99 no_list.extend([p.strip().lower() for
101 no_list.extend([p.strip().lower() for
100 p in os.getenv("no_proxy", '').split(',')
102 p in pycompat.osgetenv("no_proxy", '').split(',')
101 if p.strip()])
103 if p.strip()])
102 # "http_proxy.always" config is for running tests on localhost
104 # "http_proxy.always" config is for running tests on localhost
103 if ui.configbool("http_proxy", "always"):
105 if ui.configbool("http_proxy", "always"):
104 self.no_list = []
106 self.no_list = []
105 else:
107 else:
106 self.no_list = no_list
108 self.no_list = no_list
107
109
108 proxyurl = str(proxy)
110 proxyurl = str(proxy)
109 proxies = {'http': proxyurl, 'https': proxyurl}
111 proxies = {'http': proxyurl, 'https': proxyurl}
110 ui.debug('proxying through http://%s:%s\n' %
112 ui.debug('proxying through http://%s:%s\n' %
111 (proxy.host, proxy.port))
113 (proxy.host, proxy.port))
112 else:
114 else:
113 proxies = {}
115 proxies = {}
114
116
115 urlreq.proxyhandler.__init__(self, proxies)
117 urlreq.proxyhandler.__init__(self, proxies)
116 self.ui = ui
118 self.ui = ui
117
119
118 def proxy_open(self, req, proxy, type_):
120 def proxy_open(self, req, proxy, type_):
119 host = req.get_host().split(':')[0]
121 host = req.get_host().split(':')[0]
120 for e in self.no_list:
122 for e in self.no_list:
121 if host == e:
123 if host == e:
122 return None
124 return None
123 if e.startswith('*.') and host.endswith(e[2:]):
125 if e.startswith('*.') and host.endswith(e[2:]):
124 return None
126 return None
125 if e.startswith('.') and host.endswith(e[1:]):
127 if e.startswith('.') and host.endswith(e[1:]):
126 return None
128 return None
127
129
128 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
130 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
129
131
130 def _gen_sendfile(orgsend):
132 def _gen_sendfile(orgsend):
131 def _sendfile(self, data):
133 def _sendfile(self, data):
132 # send a file
134 # send a file
133 if isinstance(data, httpconnectionmod.httpsendfile):
135 if isinstance(data, httpconnectionmod.httpsendfile):
134 # if auth required, some data sent twice, so rewind here
136 # if auth required, some data sent twice, so rewind here
135 data.seek(0)
137 data.seek(0)
136 for chunk in util.filechunkiter(data):
138 for chunk in util.filechunkiter(data):
137 orgsend(self, chunk)
139 orgsend(self, chunk)
138 else:
140 else:
139 orgsend(self, data)
141 orgsend(self, data)
140 return _sendfile
142 return _sendfile
141
143
142 has_https = util.safehasattr(urlreq, 'httpshandler')
144 has_https = util.safehasattr(urlreq, 'httpshandler')
143
145
144 class httpconnection(keepalive.HTTPConnection):
146 class httpconnection(keepalive.HTTPConnection):
145 # must be able to send big bundle as stream.
147 # must be able to send big bundle as stream.
146 send = _gen_sendfile(keepalive.HTTPConnection.send)
148 send = _gen_sendfile(keepalive.HTTPConnection.send)
147
149
148 def getresponse(self):
150 def getresponse(self):
149 proxyres = getattr(self, 'proxyres', None)
151 proxyres = getattr(self, 'proxyres', None)
150 if proxyres:
152 if proxyres:
151 if proxyres.will_close:
153 if proxyres.will_close:
152 self.close()
154 self.close()
153 self.proxyres = None
155 self.proxyres = None
154 return proxyres
156 return proxyres
155 return keepalive.HTTPConnection.getresponse(self)
157 return keepalive.HTTPConnection.getresponse(self)
156
158
157 # general transaction handler to support different ways to handle
159 # general transaction handler to support different ways to handle
158 # HTTPS proxying before and after Python 2.6.3.
160 # HTTPS proxying before and after Python 2.6.3.
159 def _generic_start_transaction(handler, h, req):
161 def _generic_start_transaction(handler, h, req):
160 tunnel_host = getattr(req, '_tunnel_host', None)
162 tunnel_host = getattr(req, '_tunnel_host', None)
161 if tunnel_host:
163 if tunnel_host:
162 if tunnel_host[:7] not in ['http://', 'https:/']:
164 if tunnel_host[:7] not in ['http://', 'https:/']:
163 tunnel_host = 'https://' + tunnel_host
165 tunnel_host = 'https://' + tunnel_host
164 new_tunnel = True
166 new_tunnel = True
165 else:
167 else:
166 tunnel_host = req.get_selector()
168 tunnel_host = req.get_selector()
167 new_tunnel = False
169 new_tunnel = False
168
170
169 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
171 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
170 u = util.url(tunnel_host)
172 u = util.url(tunnel_host)
171 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
173 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
172 h.realhostport = ':'.join([u.host, (u.port or '443')])
174 h.realhostport = ':'.join([u.host, (u.port or '443')])
173 h.headers = req.headers.copy()
175 h.headers = req.headers.copy()
174 h.headers.update(handler.parent.addheaders)
176 h.headers.update(handler.parent.addheaders)
175 return
177 return
176
178
177 h.realhostport = None
179 h.realhostport = None
178 h.headers = None
180 h.headers = None
179
181
180 def _generic_proxytunnel(self):
182 def _generic_proxytunnel(self):
181 proxyheaders = dict(
183 proxyheaders = dict(
182 [(x, self.headers[x]) for x in self.headers
184 [(x, self.headers[x]) for x in self.headers
183 if x.lower().startswith('proxy-')])
185 if x.lower().startswith('proxy-')])
184 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
186 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
185 for header in proxyheaders.iteritems():
187 for header in proxyheaders.iteritems():
186 self.send('%s: %s\r\n' % header)
188 self.send('%s: %s\r\n' % header)
187 self.send('\r\n')
189 self.send('\r\n')
188
190
189 # majority of the following code is duplicated from
191 # majority of the following code is duplicated from
190 # httplib.HTTPConnection as there are no adequate places to
192 # httplib.HTTPConnection as there are no adequate places to
191 # override functions to provide the needed functionality
193 # override functions to provide the needed functionality
192 res = self.response_class(self.sock,
194 res = self.response_class(self.sock,
193 strict=self.strict,
195 strict=self.strict,
194 method=self._method)
196 method=self._method)
195
197
196 while True:
198 while True:
197 version, status, reason = res._read_status()
199 version, status, reason = res._read_status()
198 if status != httplib.CONTINUE:
200 if status != httplib.CONTINUE:
199 break
201 break
200 # skip lines that are all whitespace
202 # skip lines that are all whitespace
201 list(iter(lambda: res.fp.readline().strip(), ''))
203 list(iter(lambda: res.fp.readline().strip(), ''))
202 res.status = status
204 res.status = status
203 res.reason = reason.strip()
205 res.reason = reason.strip()
204
206
205 if res.status == 200:
207 if res.status == 200:
206 # skip lines until we find a blank line
208 # skip lines until we find a blank line
207 list(iter(res.fp.readline, '\r\n'))
209 list(iter(res.fp.readline, '\r\n'))
208 return True
210 return True
209
211
210 if version == 'HTTP/1.0':
212 if version == 'HTTP/1.0':
211 res.version = 10
213 res.version = 10
212 elif version.startswith('HTTP/1.'):
214 elif version.startswith('HTTP/1.'):
213 res.version = 11
215 res.version = 11
214 elif version == 'HTTP/0.9':
216 elif version == 'HTTP/0.9':
215 res.version = 9
217 res.version = 9
216 else:
218 else:
217 raise httplib.UnknownProtocol(version)
219 raise httplib.UnknownProtocol(version)
218
220
219 if res.version == 9:
221 if res.version == 9:
220 res.length = None
222 res.length = None
221 res.chunked = 0
223 res.chunked = 0
222 res.will_close = 1
224 res.will_close = 1
223 res.msg = httplib.HTTPMessage(stringio())
225 res.msg = httplib.HTTPMessage(stringio())
224 return False
226 return False
225
227
226 res.msg = httplib.HTTPMessage(res.fp)
228 res.msg = httplib.HTTPMessage(res.fp)
227 res.msg.fp = None
229 res.msg.fp = None
228
230
229 # are we using the chunked-style of transfer encoding?
231 # are we using the chunked-style of transfer encoding?
230 trenc = res.msg.getheader('transfer-encoding')
232 trenc = res.msg.getheader('transfer-encoding')
231 if trenc and trenc.lower() == "chunked":
233 if trenc and trenc.lower() == "chunked":
232 res.chunked = 1
234 res.chunked = 1
233 res.chunk_left = None
235 res.chunk_left = None
234 else:
236 else:
235 res.chunked = 0
237 res.chunked = 0
236
238
237 # will the connection close at the end of the response?
239 # will the connection close at the end of the response?
238 res.will_close = res._check_close()
240 res.will_close = res._check_close()
239
241
240 # do we have a Content-Length?
242 # do we have a Content-Length?
241 # NOTE: RFC 2616, section 4.4, #3 says we ignore this if
243 # NOTE: RFC 2616, section 4.4, #3 says we ignore this if
242 # transfer-encoding is "chunked"
244 # transfer-encoding is "chunked"
243 length = res.msg.getheader('content-length')
245 length = res.msg.getheader('content-length')
244 if length and not res.chunked:
246 if length and not res.chunked:
245 try:
247 try:
246 res.length = int(length)
248 res.length = int(length)
247 except ValueError:
249 except ValueError:
248 res.length = None
250 res.length = None
249 else:
251 else:
250 if res.length < 0: # ignore nonsensical negative lengths
252 if res.length < 0: # ignore nonsensical negative lengths
251 res.length = None
253 res.length = None
252 else:
254 else:
253 res.length = None
255 res.length = None
254
256
255 # does the body have a fixed length? (of zero)
257 # does the body have a fixed length? (of zero)
256 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
258 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
257 100 <= status < 200 or # 1xx codes
259 100 <= status < 200 or # 1xx codes
258 res._method == 'HEAD'):
260 res._method == 'HEAD'):
259 res.length = 0
261 res.length = 0
260
262
261 # if the connection remains open, and we aren't using chunked, and
263 # if the connection remains open, and we aren't using chunked, and
262 # a content-length was not provided, then assume that the connection
264 # a content-length was not provided, then assume that the connection
263 # WILL close.
265 # WILL close.
264 if (not res.will_close and
266 if (not res.will_close and
265 not res.chunked and
267 not res.chunked and
266 res.length is None):
268 res.length is None):
267 res.will_close = 1
269 res.will_close = 1
268
270
269 self.proxyres = res
271 self.proxyres = res
270
272
271 return False
273 return False
272
274
273 class httphandler(keepalive.HTTPHandler):
275 class httphandler(keepalive.HTTPHandler):
274 def http_open(self, req):
276 def http_open(self, req):
275 return self.do_open(httpconnection, req)
277 return self.do_open(httpconnection, req)
276
278
277 def _start_transaction(self, h, req):
279 def _start_transaction(self, h, req):
278 _generic_start_transaction(self, h, req)
280 _generic_start_transaction(self, h, req)
279 return keepalive.HTTPHandler._start_transaction(self, h, req)
281 return keepalive.HTTPHandler._start_transaction(self, h, req)
280
282
281 if has_https:
283 if has_https:
282 class httpsconnection(httplib.HTTPConnection):
284 class httpsconnection(httplib.HTTPConnection):
283 response_class = keepalive.HTTPResponse
285 response_class = keepalive.HTTPResponse
284 default_port = httplib.HTTPS_PORT
286 default_port = httplib.HTTPS_PORT
285 # must be able to send big bundle as stream.
287 # must be able to send big bundle as stream.
286 send = _gen_sendfile(keepalive.safesend)
288 send = _gen_sendfile(keepalive.safesend)
287 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
289 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
288
290
289 def __init__(self, host, port=None, key_file=None, cert_file=None,
291 def __init__(self, host, port=None, key_file=None, cert_file=None,
290 *args, **kwargs):
292 *args, **kwargs):
291 httplib.HTTPConnection.__init__(self, host, port, *args, **kwargs)
293 httplib.HTTPConnection.__init__(self, host, port, *args, **kwargs)
292 self.key_file = key_file
294 self.key_file = key_file
293 self.cert_file = cert_file
295 self.cert_file = cert_file
294
296
295 def connect(self):
297 def connect(self):
296 self.sock = socket.create_connection((self.host, self.port))
298 self.sock = socket.create_connection((self.host, self.port))
297
299
298 host = self.host
300 host = self.host
299 if self.realhostport: # use CONNECT proxy
301 if self.realhostport: # use CONNECT proxy
300 _generic_proxytunnel(self)
302 _generic_proxytunnel(self)
301 host = self.realhostport.rsplit(':', 1)[0]
303 host = self.realhostport.rsplit(':', 1)[0]
302 self.sock = sslutil.wrapsocket(
304 self.sock = sslutil.wrapsocket(
303 self.sock, self.key_file, self.cert_file, ui=self.ui,
305 self.sock, self.key_file, self.cert_file, ui=self.ui,
304 serverhostname=host)
306 serverhostname=host)
305 sslutil.validatesocket(self.sock)
307 sslutil.validatesocket(self.sock)
306
308
307 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
309 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
308 def __init__(self, ui):
310 def __init__(self, ui):
309 keepalive.KeepAliveHandler.__init__(self)
311 keepalive.KeepAliveHandler.__init__(self)
310 urlreq.httpshandler.__init__(self)
312 urlreq.httpshandler.__init__(self)
311 self.ui = ui
313 self.ui = ui
312 self.pwmgr = passwordmgr(self.ui,
314 self.pwmgr = passwordmgr(self.ui,
313 self.ui.httppasswordmgrdb)
315 self.ui.httppasswordmgrdb)
314
316
315 def _start_transaction(self, h, req):
317 def _start_transaction(self, h, req):
316 _generic_start_transaction(self, h, req)
318 _generic_start_transaction(self, h, req)
317 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
319 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
318
320
319 def https_open(self, req):
321 def https_open(self, req):
320 # req.get_full_url() does not contain credentials and we may
322 # req.get_full_url() does not contain credentials and we may
321 # need them to match the certificates.
323 # need them to match the certificates.
322 url = req.get_full_url()
324 url = req.get_full_url()
323 user, password = self.pwmgr.find_stored_password(url)
325 user, password = self.pwmgr.find_stored_password(url)
324 res = httpconnectionmod.readauthforuri(self.ui, url, user)
326 res = httpconnectionmod.readauthforuri(self.ui, url, user)
325 if res:
327 if res:
326 group, auth = res
328 group, auth = res
327 self.auth = auth
329 self.auth = auth
328 self.ui.debug("using auth.%s.* for authentication\n" % group)
330 self.ui.debug("using auth.%s.* for authentication\n" % group)
329 else:
331 else:
330 self.auth = None
332 self.auth = None
331 return self.do_open(self._makeconnection, req)
333 return self.do_open(self._makeconnection, req)
332
334
333 def _makeconnection(self, host, port=None, *args, **kwargs):
335 def _makeconnection(self, host, port=None, *args, **kwargs):
334 keyfile = None
336 keyfile = None
335 certfile = None
337 certfile = None
336
338
337 if len(args) >= 1: # key_file
339 if len(args) >= 1: # key_file
338 keyfile = args[0]
340 keyfile = args[0]
339 if len(args) >= 2: # cert_file
341 if len(args) >= 2: # cert_file
340 certfile = args[1]
342 certfile = args[1]
341 args = args[2:]
343 args = args[2:]
342
344
343 # if the user has specified different key/cert files in
345 # if the user has specified different key/cert files in
344 # hgrc, we prefer these
346 # hgrc, we prefer these
345 if self.auth and 'key' in self.auth and 'cert' in self.auth:
347 if self.auth and 'key' in self.auth and 'cert' in self.auth:
346 keyfile = self.auth['key']
348 keyfile = self.auth['key']
347 certfile = self.auth['cert']
349 certfile = self.auth['cert']
348
350
349 conn = httpsconnection(host, port, keyfile, certfile, *args,
351 conn = httpsconnection(host, port, keyfile, certfile, *args,
350 **kwargs)
352 **kwargs)
351 conn.ui = self.ui
353 conn.ui = self.ui
352 return conn
354 return conn
353
355
354 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
356 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
355 def __init__(self, *args, **kwargs):
357 def __init__(self, *args, **kwargs):
356 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
358 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
357 self.retried_req = None
359 self.retried_req = None
358
360
359 def reset_retry_count(self):
361 def reset_retry_count(self):
360 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
362 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
361 # forever. We disable reset_retry_count completely and reset in
363 # forever. We disable reset_retry_count completely and reset in
362 # http_error_auth_reqed instead.
364 # http_error_auth_reqed instead.
363 pass
365 pass
364
366
365 def http_error_auth_reqed(self, auth_header, host, req, headers):
367 def http_error_auth_reqed(self, auth_header, host, req, headers):
366 # Reset the retry counter once for each request.
368 # Reset the retry counter once for each request.
367 if req is not self.retried_req:
369 if req is not self.retried_req:
368 self.retried_req = req
370 self.retried_req = req
369 self.retried = 0
371 self.retried = 0
370 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
372 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
371 self, auth_header, host, req, headers)
373 self, auth_header, host, req, headers)
372
374
373 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
375 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
374 def __init__(self, *args, **kwargs):
376 def __init__(self, *args, **kwargs):
375 self.auth = None
377 self.auth = None
376 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
378 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
377 self.retried_req = None
379 self.retried_req = None
378
380
379 def http_request(self, request):
381 def http_request(self, request):
380 if self.auth:
382 if self.auth:
381 request.add_unredirected_header(self.auth_header, self.auth)
383 request.add_unredirected_header(self.auth_header, self.auth)
382
384
383 return request
385 return request
384
386
385 def https_request(self, request):
387 def https_request(self, request):
386 if self.auth:
388 if self.auth:
387 request.add_unredirected_header(self.auth_header, self.auth)
389 request.add_unredirected_header(self.auth_header, self.auth)
388
390
389 return request
391 return request
390
392
391 def reset_retry_count(self):
393 def reset_retry_count(self):
392 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
394 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
393 # forever. We disable reset_retry_count completely and reset in
395 # forever. We disable reset_retry_count completely and reset in
394 # http_error_auth_reqed instead.
396 # http_error_auth_reqed instead.
395 pass
397 pass
396
398
397 def http_error_auth_reqed(self, auth_header, host, req, headers):
399 def http_error_auth_reqed(self, auth_header, host, req, headers):
398 # Reset the retry counter once for each request.
400 # Reset the retry counter once for each request.
399 if req is not self.retried_req:
401 if req is not self.retried_req:
400 self.retried_req = req
402 self.retried_req = req
401 self.retried = 0
403 self.retried = 0
402 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
404 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
403 self, auth_header, host, req, headers)
405 self, auth_header, host, req, headers)
404
406
405 def retry_http_basic_auth(self, host, req, realm):
407 def retry_http_basic_auth(self, host, req, realm):
406 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
408 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
407 if pw is not None:
409 if pw is not None:
408 raw = "%s:%s" % (user, pw)
410 raw = "%s:%s" % (user, pw)
409 auth = 'Basic %s' % base64.b64encode(raw).strip()
411 auth = 'Basic %s' % base64.b64encode(raw).strip()
410 if req.get_header(self.auth_header, None) == auth:
412 if req.get_header(self.auth_header, None) == auth:
411 return None
413 return None
412 self.auth = auth
414 self.auth = auth
413 req.add_unredirected_header(self.auth_header, auth)
415 req.add_unredirected_header(self.auth_header, auth)
414 return self.parent.open(req)
416 return self.parent.open(req)
415 else:
417 else:
416 return None
418 return None
417
419
418 handlerfuncs = []
420 handlerfuncs = []
419
421
420 def opener(ui, authinfo=None):
422 def opener(ui, authinfo=None):
421 '''
423 '''
422 construct an opener suitable for urllib2
424 construct an opener suitable for urllib2
423 authinfo will be added to the password manager
425 authinfo will be added to the password manager
424 '''
426 '''
425 # experimental config: ui.usehttp2
427 # experimental config: ui.usehttp2
426 if ui.configbool('ui', 'usehttp2', False):
428 if ui.configbool('ui', 'usehttp2', False):
427 handlers = [
429 handlers = [
428 httpconnectionmod.http2handler(
430 httpconnectionmod.http2handler(
429 ui,
431 ui,
430 passwordmgr(ui, ui.httppasswordmgrdb))
432 passwordmgr(ui, ui.httppasswordmgrdb))
431 ]
433 ]
432 else:
434 else:
433 handlers = [httphandler()]
435 handlers = [httphandler()]
434 if has_https:
436 if has_https:
435 handlers.append(httpshandler(ui))
437 handlers.append(httpshandler(ui))
436
438
437 handlers.append(proxyhandler(ui))
439 handlers.append(proxyhandler(ui))
438
440
439 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
441 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
440 if authinfo is not None:
442 if authinfo is not None:
441 realm, uris, user, passwd = authinfo
443 realm, uris, user, passwd = authinfo
442 saveduser, savedpass = passmgr.find_stored_password(uris[0])
444 saveduser, savedpass = passmgr.find_stored_password(uris[0])
443 if user != saveduser or passwd:
445 if user != saveduser or passwd:
444 passmgr.add_password(realm, uris, user, passwd)
446 passmgr.add_password(realm, uris, user, passwd)
445 ui.debug('http auth: user %s, password %s\n' %
447 ui.debug('http auth: user %s, password %s\n' %
446 (user, passwd and '*' * len(passwd) or 'not set'))
448 (user, passwd and '*' * len(passwd) or 'not set'))
447
449
448 handlers.extend((httpbasicauthhandler(passmgr),
450 handlers.extend((httpbasicauthhandler(passmgr),
449 httpdigestauthhandler(passmgr)))
451 httpdigestauthhandler(passmgr)))
450 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
452 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
451 opener = urlreq.buildopener(*handlers)
453 opener = urlreq.buildopener(*handlers)
452
454
453 # The user agent should should *NOT* be used by servers for e.g.
455 # The user agent should should *NOT* be used by servers for e.g.
454 # protocol detection or feature negotiation: there are other
456 # protocol detection or feature negotiation: there are other
455 # facilities for that.
457 # facilities for that.
456 #
458 #
457 # "mercurial/proto-1.0" was the original user agent string and
459 # "mercurial/proto-1.0" was the original user agent string and
458 # exists for backwards compatibility reasons.
460 # exists for backwards compatibility reasons.
459 #
461 #
460 # The "(Mercurial %s)" string contains the distribution
462 # The "(Mercurial %s)" string contains the distribution
461 # name and version. Other client implementations should choose their
463 # name and version. Other client implementations should choose their
462 # own distribution name. Since servers should not be using the user
464 # own distribution name. Since servers should not be using the user
463 # agent string for anything, clients should be able to define whatever
465 # agent string for anything, clients should be able to define whatever
464 # user agent they deem appropriate.
466 # user agent they deem appropriate.
465 agent = 'mercurial/proto-1.0 (Mercurial %s)' % util.version()
467 agent = 'mercurial/proto-1.0 (Mercurial %s)' % util.version()
466 opener.addheaders = [('User-agent', agent)]
468 opener.addheaders = [('User-agent', agent)]
467 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
469 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
468 return opener
470 return opener
469
471
470 def open(ui, url_, data=None):
472 def open(ui, url_, data=None):
471 u = util.url(url_)
473 u = util.url(url_)
472 if u.scheme:
474 if u.scheme:
473 u.scheme = u.scheme.lower()
475 u.scheme = u.scheme.lower()
474 url_, authinfo = u.authinfo()
476 url_, authinfo = u.authinfo()
475 else:
477 else:
476 path = util.normpath(os.path.abspath(url_))
478 path = util.normpath(os.path.abspath(url_))
477 url_ = 'file://' + urlreq.pathname2url(path)
479 url_ = 'file://' + urlreq.pathname2url(path)
478 authinfo = None
480 authinfo = None
479 return opener(ui, authinfo).open(url_, data)
481 return opener(ui, authinfo).open(url_, data)
General Comments 0
You need to be logged in to leave comments. Login now