##// END OF EJS Templates
py3: replace os.getenv with pycompat.osgetenv...
Pulkit Goyal -
r30664:69acfd2c default
parent child Browse files
Show More
@@ -1,664 +1,665 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import hashlib
14 14 import os
15 15 import platform
16 16 import stat
17 17
18 18 from mercurial.i18n import _
19 19
20 20 from mercurial import (
21 21 dirstate,
22 22 error,
23 23 httpconnection,
24 24 match as matchmod,
25 25 node,
26 26 pycompat,
27 27 scmutil,
28 28 util,
29 29 )
30 30
31 31 shortname = '.hglf'
32 32 shortnameslash = shortname + '/'
33 33 longname = 'largefiles'
34 34
35 35 # -- Private worker functions ------------------------------------------
36 36
37 37 def getminsize(ui, assumelfiles, opt, default=10):
38 38 lfsize = opt
39 39 if not lfsize and assumelfiles:
40 40 lfsize = ui.config(longname, 'minsize', default=default)
41 41 if lfsize:
42 42 try:
43 43 lfsize = float(lfsize)
44 44 except ValueError:
45 45 raise error.Abort(_('largefiles: size must be number (not %s)\n')
46 46 % lfsize)
47 47 if lfsize is None:
48 48 raise error.Abort(_('minimum size for largefiles must be specified'))
49 49 return lfsize
50 50
51 51 def link(src, dest):
52 52 """Try to create hardlink - if that fails, efficiently make a copy."""
53 53 util.makedirs(os.path.dirname(dest))
54 54 try:
55 55 util.oslink(src, dest)
56 56 except OSError:
57 57 # if hardlinks fail, fallback on atomic copy
58 58 with open(src, 'rb') as srcf:
59 59 with util.atomictempfile(dest) as dstf:
60 60 for chunk in util.filechunkiter(srcf):
61 61 dstf.write(chunk)
62 62 os.chmod(dest, os.stat(src).st_mode)
63 63
64 64 def usercachepath(ui, hash):
65 65 '''Return the correct location in the "global" largefiles cache for a file
66 66 with the given hash.
67 67 This cache is used for sharing of largefiles across repositories - both
68 68 to preserve download bandwidth and storage space.'''
69 69 return os.path.join(_usercachedir(ui), hash)
70 70
71 71 def _usercachedir(ui):
72 72 '''Return the location of the "global" largefiles cache.'''
73 73 path = ui.configpath(longname, 'usercache', None)
74 74 if path:
75 75 return path
76 76 if pycompat.osname == 'nt':
77 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
77 appdata = pycompat.osgetenv('LOCALAPPDATA',\
78 pycompat.osgetenv('APPDATA'))
78 79 if appdata:
79 80 return os.path.join(appdata, longname)
80 81 elif platform.system() == 'Darwin':
81 home = os.getenv('HOME')
82 home = pycompat.osgetenv('HOME')
82 83 if home:
83 84 return os.path.join(home, 'Library', 'Caches', longname)
84 85 elif pycompat.osname == 'posix':
85 path = os.getenv('XDG_CACHE_HOME')
86 path = pycompat.osgetenv('XDG_CACHE_HOME')
86 87 if path:
87 88 return os.path.join(path, longname)
88 home = os.getenv('HOME')
89 home = pycompat.osgetenv('HOME')
89 90 if home:
90 91 return os.path.join(home, '.cache', longname)
91 92 else:
92 93 raise error.Abort(_('unknown operating system: %s\n')
93 94 % pycompat.osname)
94 95 raise error.Abort(_('unknown %s usercache location') % longname)
95 96
96 97 def inusercache(ui, hash):
97 98 path = usercachepath(ui, hash)
98 99 return os.path.exists(path)
99 100
100 101 def findfile(repo, hash):
101 102 '''Return store path of the largefile with the specified hash.
102 103 As a side effect, the file might be linked from user cache.
103 104 Return None if the file can't be found locally.'''
104 105 path, exists = findstorepath(repo, hash)
105 106 if exists:
106 107 repo.ui.note(_('found %s in store\n') % hash)
107 108 return path
108 109 elif inusercache(repo.ui, hash):
109 110 repo.ui.note(_('found %s in system cache\n') % hash)
110 111 path = storepath(repo, hash)
111 112 link(usercachepath(repo.ui, hash), path)
112 113 return path
113 114 return None
114 115
115 116 class largefilesdirstate(dirstate.dirstate):
116 117 def __getitem__(self, key):
117 118 return super(largefilesdirstate, self).__getitem__(unixpath(key))
118 119 def normal(self, f):
119 120 return super(largefilesdirstate, self).normal(unixpath(f))
120 121 def remove(self, f):
121 122 return super(largefilesdirstate, self).remove(unixpath(f))
122 123 def add(self, f):
123 124 return super(largefilesdirstate, self).add(unixpath(f))
124 125 def drop(self, f):
125 126 return super(largefilesdirstate, self).drop(unixpath(f))
126 127 def forget(self, f):
127 128 return super(largefilesdirstate, self).forget(unixpath(f))
128 129 def normallookup(self, f):
129 130 return super(largefilesdirstate, self).normallookup(unixpath(f))
130 131 def _ignore(self, f):
131 132 return False
132 133 def write(self, tr=False):
133 134 # (1) disable PENDING mode always
134 135 # (lfdirstate isn't yet managed as a part of the transaction)
135 136 # (2) avoid develwarn 'use dirstate.write with ....'
136 137 super(largefilesdirstate, self).write(None)
137 138
138 139 def openlfdirstate(ui, repo, create=True):
139 140 '''
140 141 Return a dirstate object that tracks largefiles: i.e. its root is
141 142 the repo root, but it is saved in .hg/largefiles/dirstate.
142 143 '''
143 144 vfs = repo.vfs
144 145 lfstoredir = longname
145 146 opener = scmutil.opener(vfs.join(lfstoredir))
146 147 lfdirstate = largefilesdirstate(opener, ui, repo.root,
147 148 repo.dirstate._validate)
148 149
149 150 # If the largefiles dirstate does not exist, populate and create
150 151 # it. This ensures that we create it on the first meaningful
151 152 # largefiles operation in a new clone.
152 153 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
153 154 matcher = getstandinmatcher(repo)
154 155 standins = repo.dirstate.walk(matcher, [], False, False)
155 156
156 157 if len(standins) > 0:
157 158 vfs.makedirs(lfstoredir)
158 159
159 160 for standin in standins:
160 161 lfile = splitstandin(standin)
161 162 lfdirstate.normallookup(lfile)
162 163 return lfdirstate
163 164
164 165 def lfdirstatestatus(lfdirstate, repo):
165 166 wctx = repo['.']
166 167 match = matchmod.always(repo.root, repo.getcwd())
167 168 unsure, s = lfdirstate.status(match, [], False, False, False)
168 169 modified, clean = s.modified, s.clean
169 170 for lfile in unsure:
170 171 try:
171 172 fctx = wctx[standin(lfile)]
172 173 except LookupError:
173 174 fctx = None
174 175 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
175 176 modified.append(lfile)
176 177 else:
177 178 clean.append(lfile)
178 179 lfdirstate.normal(lfile)
179 180 return s
180 181
181 182 def listlfiles(repo, rev=None, matcher=None):
182 183 '''return a list of largefiles in the working copy or the
183 184 specified changeset'''
184 185
185 186 if matcher is None:
186 187 matcher = getstandinmatcher(repo)
187 188
188 189 # ignore unknown files in working directory
189 190 return [splitstandin(f)
190 191 for f in repo[rev].walk(matcher)
191 192 if rev is not None or repo.dirstate[f] != '?']
192 193
193 194 def instore(repo, hash, forcelocal=False):
194 195 '''Return true if a largefile with the given hash exists in the store'''
195 196 return os.path.exists(storepath(repo, hash, forcelocal))
196 197
197 198 def storepath(repo, hash, forcelocal=False):
198 199 '''Return the correct location in the repository largefiles store for a
199 200 file with the given hash.'''
200 201 if not forcelocal and repo.shared():
201 202 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
202 203 return repo.join(longname, hash)
203 204
204 205 def findstorepath(repo, hash):
205 206 '''Search through the local store path(s) to find the file for the given
206 207 hash. If the file is not found, its path in the primary store is returned.
207 208 The return value is a tuple of (path, exists(path)).
208 209 '''
209 210 # For shared repos, the primary store is in the share source. But for
210 211 # backward compatibility, force a lookup in the local store if it wasn't
211 212 # found in the share source.
212 213 path = storepath(repo, hash, False)
213 214
214 215 if instore(repo, hash):
215 216 return (path, True)
216 217 elif repo.shared() and instore(repo, hash, True):
217 218 return storepath(repo, hash, True), True
218 219
219 220 return (path, False)
220 221
221 222 def copyfromcache(repo, hash, filename):
222 223 '''Copy the specified largefile from the repo or system cache to
223 224 filename in the repository. Return true on success or false if the
224 225 file was not found in either cache (which should not happened:
225 226 this is meant to be called only after ensuring that the needed
226 227 largefile exists in the cache).'''
227 228 wvfs = repo.wvfs
228 229 path = findfile(repo, hash)
229 230 if path is None:
230 231 return False
231 232 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
232 233 # The write may fail before the file is fully written, but we
233 234 # don't use atomic writes in the working copy.
234 235 with open(path, 'rb') as srcfd:
235 236 with wvfs(filename, 'wb') as destfd:
236 237 gothash = copyandhash(
237 238 util.filechunkiter(srcfd), destfd)
238 239 if gothash != hash:
239 240 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
240 241 % (filename, path, gothash))
241 242 wvfs.unlink(filename)
242 243 return False
243 244 return True
244 245
245 246 def copytostore(repo, rev, file, uploaded=False):
246 247 wvfs = repo.wvfs
247 248 hash = readstandin(repo, file, rev)
248 249 if instore(repo, hash):
249 250 return
250 251 if wvfs.exists(file):
251 252 copytostoreabsolute(repo, wvfs.join(file), hash)
252 253 else:
253 254 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
254 255 (file, hash))
255 256
256 257 def copyalltostore(repo, node):
257 258 '''Copy all largefiles in a given revision to the store'''
258 259
259 260 ctx = repo[node]
260 261 for filename in ctx.files():
261 262 if isstandin(filename) and filename in ctx.manifest():
262 263 realfile = splitstandin(filename)
263 264 copytostore(repo, ctx.node(), realfile)
264 265
265 266 def copytostoreabsolute(repo, file, hash):
266 267 if inusercache(repo.ui, hash):
267 268 link(usercachepath(repo.ui, hash), storepath(repo, hash))
268 269 else:
269 270 util.makedirs(os.path.dirname(storepath(repo, hash)))
270 271 with open(file, 'rb') as srcf:
271 272 with util.atomictempfile(storepath(repo, hash),
272 273 createmode=repo.store.createmode) as dstf:
273 274 for chunk in util.filechunkiter(srcf):
274 275 dstf.write(chunk)
275 276 linktousercache(repo, hash)
276 277
277 278 def linktousercache(repo, hash):
278 279 '''Link / copy the largefile with the specified hash from the store
279 280 to the cache.'''
280 281 path = usercachepath(repo.ui, hash)
281 282 link(storepath(repo, hash), path)
282 283
283 284 def getstandinmatcher(repo, rmatcher=None):
284 285 '''Return a match object that applies rmatcher to the standin directory'''
285 286 wvfs = repo.wvfs
286 287 standindir = shortname
287 288
288 289 # no warnings about missing files or directories
289 290 badfn = lambda f, msg: None
290 291
291 292 if rmatcher and not rmatcher.always():
292 293 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
293 294 if not pats:
294 295 pats = [wvfs.join(standindir)]
295 296 match = scmutil.match(repo[None], pats, badfn=badfn)
296 297 # if pats is empty, it would incorrectly always match, so clear _always
297 298 match._always = False
298 299 else:
299 300 # no patterns: relative to repo root
300 301 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
301 302 return match
302 303
303 304 def composestandinmatcher(repo, rmatcher):
304 305 '''Return a matcher that accepts standins corresponding to the
305 306 files accepted by rmatcher. Pass the list of files in the matcher
306 307 as the paths specified by the user.'''
307 308 smatcher = getstandinmatcher(repo, rmatcher)
308 309 isstandin = smatcher.matchfn
309 310 def composedmatchfn(f):
310 311 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
311 312 smatcher.matchfn = composedmatchfn
312 313
313 314 return smatcher
314 315
315 316 def standin(filename):
316 317 '''Return the repo-relative path to the standin for the specified big
317 318 file.'''
318 319 # Notes:
319 320 # 1) Some callers want an absolute path, but for instance addlargefiles
320 321 # needs it repo-relative so it can be passed to repo[None].add(). So
321 322 # leave it up to the caller to use repo.wjoin() to get an absolute path.
322 323 # 2) Join with '/' because that's what dirstate always uses, even on
323 324 # Windows. Change existing separator to '/' first in case we are
324 325 # passed filenames from an external source (like the command line).
325 326 return shortnameslash + util.pconvert(filename)
326 327
327 328 def isstandin(filename):
328 329 '''Return true if filename is a big file standin. filename must be
329 330 in Mercurial's internal form (slash-separated).'''
330 331 return filename.startswith(shortnameslash)
331 332
332 333 def splitstandin(filename):
333 334 # Split on / because that's what dirstate always uses, even on Windows.
334 335 # Change local separator to / first just in case we are passed filenames
335 336 # from an external source (like the command line).
336 337 bits = util.pconvert(filename).split('/', 1)
337 338 if len(bits) == 2 and bits[0] == shortname:
338 339 return bits[1]
339 340 else:
340 341 return None
341 342
342 343 def updatestandin(repo, standin):
343 344 file = repo.wjoin(splitstandin(standin))
344 345 if repo.wvfs.exists(splitstandin(standin)):
345 346 hash = hashfile(file)
346 347 executable = getexecutable(file)
347 348 writestandin(repo, standin, hash, executable)
348 349 else:
349 350 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
350 351
351 352 def readstandin(repo, filename, node=None):
352 353 '''read hex hash from standin for filename at given node, or working
353 354 directory if no node is given'''
354 355 return repo[node][standin(filename)].data().strip()
355 356
356 357 def writestandin(repo, standin, hash, executable):
357 358 '''write hash to <repo.root>/<standin>'''
358 359 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
359 360
360 361 def copyandhash(instream, outfile):
361 362 '''Read bytes from instream (iterable) and write them to outfile,
362 363 computing the SHA-1 hash of the data along the way. Return the hash.'''
363 364 hasher = hashlib.sha1('')
364 365 for data in instream:
365 366 hasher.update(data)
366 367 outfile.write(data)
367 368 return hasher.hexdigest()
368 369
369 370 def hashrepofile(repo, file):
370 371 return hashfile(repo.wjoin(file))
371 372
372 373 def hashfile(file):
373 374 if not os.path.exists(file):
374 375 return ''
375 376 hasher = hashlib.sha1('')
376 377 with open(file, 'rb') as fd:
377 378 for data in util.filechunkiter(fd):
378 379 hasher.update(data)
379 380 return hasher.hexdigest()
380 381
381 382 def getexecutable(filename):
382 383 mode = os.stat(filename).st_mode
383 384 return ((mode & stat.S_IXUSR) and
384 385 (mode & stat.S_IXGRP) and
385 386 (mode & stat.S_IXOTH))
386 387
387 388 def urljoin(first, second, *arg):
388 389 def join(left, right):
389 390 if not left.endswith('/'):
390 391 left += '/'
391 392 if right.startswith('/'):
392 393 right = right[1:]
393 394 return left + right
394 395
395 396 url = join(first, second)
396 397 for a in arg:
397 398 url = join(url, a)
398 399 return url
399 400
400 401 def hexsha1(data):
401 402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 403 object data"""
403 404 h = hashlib.sha1()
404 405 for chunk in util.filechunkiter(data):
405 406 h.update(chunk)
406 407 return h.hexdigest()
407 408
408 409 def httpsendfile(ui, filename):
409 410 return httpconnection.httpsendfile(ui, filename, 'rb')
410 411
411 412 def unixpath(path):
412 413 '''Return a version of path normalized for use with the lfdirstate.'''
413 414 return util.pconvert(os.path.normpath(path))
414 415
415 416 def islfilesrepo(repo):
416 417 '''Return true if the repo is a largefile repo.'''
417 418 if ('largefiles' in repo.requirements and
418 419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 420 return True
420 421
421 422 return any(openlfdirstate(repo.ui, repo, False))
422 423
423 424 class storeprotonotcapable(Exception):
424 425 def __init__(self, storetypes):
425 426 self.storetypes = storetypes
426 427
427 428 def getstandinsstate(repo):
428 429 standins = []
429 430 matcher = getstandinmatcher(repo)
430 431 for standin in repo.dirstate.walk(matcher, [], False, False):
431 432 lfile = splitstandin(standin)
432 433 try:
433 434 hash = readstandin(repo, lfile)
434 435 except IOError:
435 436 hash = None
436 437 standins.append((lfile, hash))
437 438 return standins
438 439
439 440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 441 lfstandin = standin(lfile)
441 442 if lfstandin in repo.dirstate:
442 443 stat = repo.dirstate._map[lfstandin]
443 444 state, mtime = stat[0], stat[3]
444 445 else:
445 446 state, mtime = '?', -1
446 447 if state == 'n':
447 448 if (normallookup or mtime < 0 or
448 449 not repo.wvfs.exists(lfile)):
449 450 # state 'n' doesn't ensure 'clean' in this case
450 451 lfdirstate.normallookup(lfile)
451 452 else:
452 453 lfdirstate.normal(lfile)
453 454 elif state == 'm':
454 455 lfdirstate.normallookup(lfile)
455 456 elif state == 'r':
456 457 lfdirstate.remove(lfile)
457 458 elif state == 'a':
458 459 lfdirstate.add(lfile)
459 460 elif state == '?':
460 461 lfdirstate.drop(lfile)
461 462
462 463 def markcommitted(orig, ctx, node):
463 464 repo = ctx.repo()
464 465
465 466 orig(node)
466 467
467 468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 469 # because files coming from the 2nd parent are omitted in the latter.
469 470 #
470 471 # The former should be used to get targets of "synclfdirstate",
471 472 # because such files:
472 473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 474 # - have to be marked as "n" after commit, but
474 475 # - aren't listed in "repo[node].files()"
475 476
476 477 lfdirstate = openlfdirstate(repo.ui, repo)
477 478 for f in ctx.files():
478 479 if isstandin(f):
479 480 lfile = splitstandin(f)
480 481 synclfdirstate(repo, lfdirstate, lfile, False)
481 482 lfdirstate.write()
482 483
483 484 # As part of committing, copy all of the largefiles into the cache.
484 485 copyalltostore(repo, node)
485 486
486 487 def getlfilestoupdate(oldstandins, newstandins):
487 488 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
488 489 filelist = []
489 490 for f in changedstandins:
490 491 if f[0] not in filelist:
491 492 filelist.append(f[0])
492 493 return filelist
493 494
494 495 def getlfilestoupload(repo, missing, addfunc):
495 496 for i, n in enumerate(missing):
496 497 repo.ui.progress(_('finding outgoing largefiles'), i,
497 498 unit=_('revisions'), total=len(missing))
498 499 parents = [p for p in repo[n].parents() if p != node.nullid]
499 500
500 501 oldlfstatus = repo.lfstatus
501 502 repo.lfstatus = False
502 503 try:
503 504 ctx = repo[n]
504 505 finally:
505 506 repo.lfstatus = oldlfstatus
506 507
507 508 files = set(ctx.files())
508 509 if len(parents) == 2:
509 510 mc = ctx.manifest()
510 511 mp1 = ctx.parents()[0].manifest()
511 512 mp2 = ctx.parents()[1].manifest()
512 513 for f in mp1:
513 514 if f not in mc:
514 515 files.add(f)
515 516 for f in mp2:
516 517 if f not in mc:
517 518 files.add(f)
518 519 for f in mc:
519 520 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
520 521 files.add(f)
521 522 for fn in files:
522 523 if isstandin(fn) and fn in ctx:
523 524 addfunc(fn, ctx[fn].data().strip())
524 525 repo.ui.progress(_('finding outgoing largefiles'), None)
525 526
526 527 def updatestandinsbymatch(repo, match):
527 528 '''Update standins in the working directory according to specified match
528 529
529 530 This returns (possibly modified) ``match`` object to be used for
530 531 subsequent commit process.
531 532 '''
532 533
533 534 ui = repo.ui
534 535
535 536 # Case 1: user calls commit with no specific files or
536 537 # include/exclude patterns: refresh and commit all files that
537 538 # are "dirty".
538 539 if match is None or match.always():
539 540 # Spend a bit of time here to get a list of files we know
540 541 # are modified so we can compare only against those.
541 542 # It can cost a lot of time (several seconds)
542 543 # otherwise to update all standins if the largefiles are
543 544 # large.
544 545 lfdirstate = openlfdirstate(ui, repo)
545 546 dirtymatch = matchmod.always(repo.root, repo.getcwd())
546 547 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
547 548 False)
548 549 modifiedfiles = unsure + s.modified + s.added + s.removed
549 550 lfiles = listlfiles(repo)
550 551 # this only loops through largefiles that exist (not
551 552 # removed/renamed)
552 553 for lfile in lfiles:
553 554 if lfile in modifiedfiles:
554 555 if repo.wvfs.exists(standin(lfile)):
555 556 # this handles the case where a rebase is being
556 557 # performed and the working copy is not updated
557 558 # yet.
558 559 if repo.wvfs.exists(lfile):
559 560 updatestandin(repo,
560 561 standin(lfile))
561 562
562 563 return match
563 564
564 565 lfiles = listlfiles(repo)
565 566 match._files = repo._subdirlfs(match.files(), lfiles)
566 567
567 568 # Case 2: user calls commit with specified patterns: refresh
568 569 # any matching big files.
569 570 smatcher = composestandinmatcher(repo, match)
570 571 standins = repo.dirstate.walk(smatcher, [], False, False)
571 572
572 573 # No matching big files: get out of the way and pass control to
573 574 # the usual commit() method.
574 575 if not standins:
575 576 return match
576 577
577 578 # Refresh all matching big files. It's possible that the
578 579 # commit will end up failing, in which case the big files will
579 580 # stay refreshed. No harm done: the user modified them and
580 581 # asked to commit them, so sooner or later we're going to
581 582 # refresh the standins. Might as well leave them refreshed.
582 583 lfdirstate = openlfdirstate(ui, repo)
583 584 for fstandin in standins:
584 585 lfile = splitstandin(fstandin)
585 586 if lfdirstate[lfile] != 'r':
586 587 updatestandin(repo, fstandin)
587 588
588 589 # Cook up a new matcher that only matches regular files or
589 590 # standins corresponding to the big files requested by the
590 591 # user. Have to modify _files to prevent commit() from
591 592 # complaining "not tracked" for big files.
592 593 match = copy.copy(match)
593 594 origmatchfn = match.matchfn
594 595
595 596 # Check both the list of largefiles and the list of
596 597 # standins because if a largefile was removed, it
597 598 # won't be in the list of largefiles at this point
598 599 match._files += sorted(standins)
599 600
600 601 actualfiles = []
601 602 for f in match._files:
602 603 fstandin = standin(f)
603 604
604 605 # For largefiles, only one of the normal and standin should be
605 606 # committed (except if one of them is a remove). In the case of a
606 607 # standin removal, drop the normal file if it is unknown to dirstate.
607 608 # Thus, skip plain largefile names but keep the standin.
608 609 if f in lfiles or fstandin in standins:
609 610 if repo.dirstate[fstandin] != 'r':
610 611 if repo.dirstate[f] != 'r':
611 612 continue
612 613 elif repo.dirstate[f] == '?':
613 614 continue
614 615
615 616 actualfiles.append(f)
616 617 match._files = actualfiles
617 618
618 619 def matchfn(f):
619 620 if origmatchfn(f):
620 621 return f not in lfiles
621 622 else:
622 623 return f in standins
623 624
624 625 match.matchfn = matchfn
625 626
626 627 return match
627 628
628 629 class automatedcommithook(object):
629 630 '''Stateful hook to update standins at the 1st commit of resuming
630 631
631 632 For efficiency, updating standins in the working directory should
632 633 be avoided while automated committing (like rebase, transplant and
633 634 so on), because they should be updated before committing.
634 635
635 636 But the 1st commit of resuming automated committing (e.g. ``rebase
636 637 --continue``) should update them, because largefiles may be
637 638 modified manually.
638 639 '''
639 640 def __init__(self, resuming):
640 641 self.resuming = resuming
641 642
642 643 def __call__(self, repo, match):
643 644 if self.resuming:
644 645 self.resuming = False # avoids updating at subsequent commits
645 646 return updatestandinsbymatch(repo, match)
646 647 else:
647 648 return match
648 649
649 650 def getstatuswriter(ui, repo, forcibly=None):
650 651 '''Return the function to write largefiles specific status out
651 652
652 653 If ``forcibly`` is ``None``, this returns the last element of
653 654 ``repo._lfstatuswriters`` as "default" writer function.
654 655
655 656 Otherwise, this returns the function to always write out (or
656 657 ignore if ``not forcibly``) status.
657 658 '''
658 659 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
659 660 return repo._lfstatuswriters[-1]
660 661 else:
661 662 if forcibly:
662 663 return ui.status # forcibly WRITE OUT
663 664 else:
664 665 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,176 +1,176 b''
1 1 # profiling.py - profiling functions
2 2 #
3 3 # Copyright 2016 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import, print_function
9 9
10 10 import contextlib
11 import os
12 11 import time
13 12
14 13 from .i18n import _
15 14 from . import (
16 15 error,
16 pycompat,
17 17 util,
18 18 )
19 19
20 20 @contextlib.contextmanager
21 21 def lsprofile(ui, fp):
22 22 format = ui.config('profiling', 'format', default='text')
23 23 field = ui.config('profiling', 'sort', default='inlinetime')
24 24 limit = ui.configint('profiling', 'limit', default=30)
25 25 climit = ui.configint('profiling', 'nested', default=0)
26 26
27 27 if format not in ['text', 'kcachegrind']:
28 28 ui.warn(_("unrecognized profiling format '%s'"
29 29 " - Ignored\n") % format)
30 30 format = 'text'
31 31
32 32 try:
33 33 from . import lsprof
34 34 except ImportError:
35 35 raise error.Abort(_(
36 36 'lsprof not available - install from '
37 37 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
38 38 p = lsprof.Profiler()
39 39 p.enable(subcalls=True)
40 40 try:
41 41 yield
42 42 finally:
43 43 p.disable()
44 44
45 45 if format == 'kcachegrind':
46 46 from . import lsprofcalltree
47 47 calltree = lsprofcalltree.KCacheGrind(p)
48 48 calltree.output(fp)
49 49 else:
50 50 # format == 'text'
51 51 stats = lsprof.Stats(p.getstats())
52 52 stats.sort(field)
53 53 stats.pprint(limit=limit, file=fp, climit=climit)
54 54
55 55 @contextlib.contextmanager
56 56 def flameprofile(ui, fp):
57 57 try:
58 58 from flamegraph import flamegraph
59 59 except ImportError:
60 60 raise error.Abort(_(
61 61 'flamegraph not available - install from '
62 62 'https://github.com/evanhempel/python-flamegraph'))
63 63 # developer config: profiling.freq
64 64 freq = ui.configint('profiling', 'freq', default=1000)
65 65 filter_ = None
66 66 collapse_recursion = True
67 67 thread = flamegraph.ProfileThread(fp, 1.0 / freq,
68 68 filter_, collapse_recursion)
69 69 start_time = time.clock()
70 70 try:
71 71 thread.start()
72 72 yield
73 73 finally:
74 74 thread.stop()
75 75 thread.join()
76 76 print('Collected %d stack frames (%d unique) in %2.2f seconds.' % (
77 77 time.clock() - start_time, thread.num_frames(),
78 78 thread.num_frames(unique=True)))
79 79
80 80 @contextlib.contextmanager
81 81 def statprofile(ui, fp):
82 82 from . import statprof
83 83
84 84 freq = ui.configint('profiling', 'freq', default=1000)
85 85 if freq > 0:
86 86 # Cannot reset when profiler is already active. So silently no-op.
87 87 if statprof.state.profile_level == 0:
88 88 statprof.reset(freq)
89 89 else:
90 90 ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq)
91 91
92 92 statprof.start(mechanism='thread')
93 93
94 94 try:
95 95 yield
96 96 finally:
97 97 data = statprof.stop()
98 98
99 99 profformat = ui.config('profiling', 'statformat', 'hotpath')
100 100
101 101 formats = {
102 102 'byline': statprof.DisplayFormats.ByLine,
103 103 'bymethod': statprof.DisplayFormats.ByMethod,
104 104 'hotpath': statprof.DisplayFormats.Hotpath,
105 105 'json': statprof.DisplayFormats.Json,
106 106 }
107 107
108 108 if profformat in formats:
109 109 displayformat = formats[profformat]
110 110 else:
111 111 ui.warn(_('unknown profiler output format: %s\n') % profformat)
112 112 displayformat = statprof.DisplayFormats.Hotpath
113 113
114 114 statprof.display(fp, data=data, format=displayformat)
115 115
116 116 @contextlib.contextmanager
117 117 def profile(ui):
118 118 """Start profiling.
119 119
120 120 Profiling is active when the context manager is active. When the context
121 121 manager exits, profiling results will be written to the configured output.
122 122 """
123 profiler = os.getenv('HGPROF')
123 profiler = pycompat.osgetenv('HGPROF')
124 124 if profiler is None:
125 125 profiler = ui.config('profiling', 'type', default='stat')
126 126 if profiler not in ('ls', 'stat', 'flame'):
127 127 ui.warn(_("unrecognized profiler '%s' - ignored\n") % profiler)
128 128 profiler = 'stat'
129 129
130 130 output = ui.config('profiling', 'output')
131 131
132 132 if output == 'blackbox':
133 133 fp = util.stringio()
134 134 elif output:
135 135 path = ui.expandpath(output)
136 136 fp = open(path, 'wb')
137 137 else:
138 138 fp = ui.ferr
139 139
140 140 try:
141 141 if profiler == 'ls':
142 142 proffn = lsprofile
143 143 elif profiler == 'flame':
144 144 proffn = flameprofile
145 145 else:
146 146 proffn = statprofile
147 147
148 148 with proffn(ui, fp):
149 149 yield
150 150
151 151 finally:
152 152 if output:
153 153 if output == 'blackbox':
154 154 val = 'Profile:\n%s' % fp.getvalue()
155 155 # ui.log treats the input as a format string,
156 156 # so we need to escape any % signs.
157 157 val = val.replace('%', '%%')
158 158 ui.log('profile', val)
159 159 fp.close()
160 160
161 161 @contextlib.contextmanager
162 162 def maybeprofile(ui):
163 163 """Profile if enabled, else do nothing.
164 164
165 165 This context manager can be used to optionally profile if profiling
166 166 is enabled. Otherwise, it does nothing.
167 167
168 168 The purpose of this context manager is to make calling code simpler:
169 169 just use a single code path for calling into code you may want to profile
170 170 and this function determines whether to start profiling.
171 171 """
172 172 if ui.configbool('profiling', 'enabled'):
173 173 with profile(ui):
174 174 yield
175 175 else:
176 176 yield
@@ -1,479 +1,481 b''
1 1 # url.py - HTTP handling for mercurial
2 2 #
3 3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import base64
13 13 import os
14 14 import socket
15 15
16 16 from .i18n import _
17 17 from . import (
18 18 error,
19 19 httpconnection as httpconnectionmod,
20 20 keepalive,
21 pycompat,
21 22 sslutil,
22 23 util,
23 24 )
24 25
25 26 httplib = util.httplib
26 27 stringio = util.stringio
27 28 urlerr = util.urlerr
28 29 urlreq = util.urlreq
29 30
30 31 class passwordmgr(object):
31 32 def __init__(self, ui, passwddb):
32 33 self.ui = ui
33 34 self.passwddb = passwddb
34 35
35 36 def add_password(self, realm, uri, user, passwd):
36 37 return self.passwddb.add_password(realm, uri, user, passwd)
37 38
38 39 def find_user_password(self, realm, authuri):
39 40 authinfo = self.passwddb.find_user_password(realm, authuri)
40 41 user, passwd = authinfo
41 42 if user and passwd:
42 43 self._writedebug(user, passwd)
43 44 return (user, passwd)
44 45
45 46 if not user or not passwd:
46 47 res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
47 48 if res:
48 49 group, auth = res
49 50 user, passwd = auth.get('username'), auth.get('password')
50 51 self.ui.debug("using auth.%s.* for authentication\n" % group)
51 52 if not user or not passwd:
52 53 u = util.url(authuri)
53 54 u.query = None
54 55 if not self.ui.interactive():
55 56 raise error.Abort(_('http authorization required for %s') %
56 57 util.hidepassword(str(u)))
57 58
58 59 self.ui.write(_("http authorization required for %s\n") %
59 60 util.hidepassword(str(u)))
60 61 self.ui.write(_("realm: %s\n") % realm)
61 62 if user:
62 63 self.ui.write(_("user: %s\n") % user)
63 64 else:
64 65 user = self.ui.prompt(_("user:"), default=None)
65 66
66 67 if not passwd:
67 68 passwd = self.ui.getpass()
68 69
69 70 self.passwddb.add_password(realm, authuri, user, passwd)
70 71 self._writedebug(user, passwd)
71 72 return (user, passwd)
72 73
73 74 def _writedebug(self, user, passwd):
74 75 msg = _('http auth: user %s, password %s\n')
75 76 self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
76 77
77 78 def find_stored_password(self, authuri):
78 79 return self.passwddb.find_user_password(None, authuri)
79 80
80 81 class proxyhandler(urlreq.proxyhandler):
81 82 def __init__(self, ui):
82 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
83 proxyurl = (ui.config("http_proxy", "host") or
84 pycompat.osgetenv('http_proxy'))
83 85 # XXX proxyauthinfo = None
84 86
85 87 if proxyurl:
86 88 # proxy can be proper url or host[:port]
87 89 if not (proxyurl.startswith('http:') or
88 90 proxyurl.startswith('https:')):
89 91 proxyurl = 'http://' + proxyurl + '/'
90 92 proxy = util.url(proxyurl)
91 93 if not proxy.user:
92 94 proxy.user = ui.config("http_proxy", "user")
93 95 proxy.passwd = ui.config("http_proxy", "passwd")
94 96
95 97 # see if we should use a proxy for this url
96 98 no_list = ["localhost", "127.0.0.1"]
97 99 no_list.extend([p.lower() for
98 100 p in ui.configlist("http_proxy", "no")])
99 101 no_list.extend([p.strip().lower() for
100 p in os.getenv("no_proxy", '').split(',')
102 p in pycompat.osgetenv("no_proxy", '').split(',')
101 103 if p.strip()])
102 104 # "http_proxy.always" config is for running tests on localhost
103 105 if ui.configbool("http_proxy", "always"):
104 106 self.no_list = []
105 107 else:
106 108 self.no_list = no_list
107 109
108 110 proxyurl = str(proxy)
109 111 proxies = {'http': proxyurl, 'https': proxyurl}
110 112 ui.debug('proxying through http://%s:%s\n' %
111 113 (proxy.host, proxy.port))
112 114 else:
113 115 proxies = {}
114 116
115 117 urlreq.proxyhandler.__init__(self, proxies)
116 118 self.ui = ui
117 119
118 120 def proxy_open(self, req, proxy, type_):
119 121 host = req.get_host().split(':')[0]
120 122 for e in self.no_list:
121 123 if host == e:
122 124 return None
123 125 if e.startswith('*.') and host.endswith(e[2:]):
124 126 return None
125 127 if e.startswith('.') and host.endswith(e[1:]):
126 128 return None
127 129
128 130 return urlreq.proxyhandler.proxy_open(self, req, proxy, type_)
129 131
130 132 def _gen_sendfile(orgsend):
131 133 def _sendfile(self, data):
132 134 # send a file
133 135 if isinstance(data, httpconnectionmod.httpsendfile):
134 136 # if auth required, some data sent twice, so rewind here
135 137 data.seek(0)
136 138 for chunk in util.filechunkiter(data):
137 139 orgsend(self, chunk)
138 140 else:
139 141 orgsend(self, data)
140 142 return _sendfile
141 143
142 144 has_https = util.safehasattr(urlreq, 'httpshandler')
143 145
144 146 class httpconnection(keepalive.HTTPConnection):
145 147 # must be able to send big bundle as stream.
146 148 send = _gen_sendfile(keepalive.HTTPConnection.send)
147 149
148 150 def getresponse(self):
149 151 proxyres = getattr(self, 'proxyres', None)
150 152 if proxyres:
151 153 if proxyres.will_close:
152 154 self.close()
153 155 self.proxyres = None
154 156 return proxyres
155 157 return keepalive.HTTPConnection.getresponse(self)
156 158
157 159 # general transaction handler to support different ways to handle
158 160 # HTTPS proxying before and after Python 2.6.3.
159 161 def _generic_start_transaction(handler, h, req):
160 162 tunnel_host = getattr(req, '_tunnel_host', None)
161 163 if tunnel_host:
162 164 if tunnel_host[:7] not in ['http://', 'https:/']:
163 165 tunnel_host = 'https://' + tunnel_host
164 166 new_tunnel = True
165 167 else:
166 168 tunnel_host = req.get_selector()
167 169 new_tunnel = False
168 170
169 171 if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
170 172 u = util.url(tunnel_host)
171 173 if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
172 174 h.realhostport = ':'.join([u.host, (u.port or '443')])
173 175 h.headers = req.headers.copy()
174 176 h.headers.update(handler.parent.addheaders)
175 177 return
176 178
177 179 h.realhostport = None
178 180 h.headers = None
179 181
180 182 def _generic_proxytunnel(self):
181 183 proxyheaders = dict(
182 184 [(x, self.headers[x]) for x in self.headers
183 185 if x.lower().startswith('proxy-')])
184 186 self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
185 187 for header in proxyheaders.iteritems():
186 188 self.send('%s: %s\r\n' % header)
187 189 self.send('\r\n')
188 190
189 191 # majority of the following code is duplicated from
190 192 # httplib.HTTPConnection as there are no adequate places to
191 193 # override functions to provide the needed functionality
192 194 res = self.response_class(self.sock,
193 195 strict=self.strict,
194 196 method=self._method)
195 197
196 198 while True:
197 199 version, status, reason = res._read_status()
198 200 if status != httplib.CONTINUE:
199 201 break
200 202 # skip lines that are all whitespace
201 203 list(iter(lambda: res.fp.readline().strip(), ''))
202 204 res.status = status
203 205 res.reason = reason.strip()
204 206
205 207 if res.status == 200:
206 208 # skip lines until we find a blank line
207 209 list(iter(res.fp.readline, '\r\n'))
208 210 return True
209 211
210 212 if version == 'HTTP/1.0':
211 213 res.version = 10
212 214 elif version.startswith('HTTP/1.'):
213 215 res.version = 11
214 216 elif version == 'HTTP/0.9':
215 217 res.version = 9
216 218 else:
217 219 raise httplib.UnknownProtocol(version)
218 220
219 221 if res.version == 9:
220 222 res.length = None
221 223 res.chunked = 0
222 224 res.will_close = 1
223 225 res.msg = httplib.HTTPMessage(stringio())
224 226 return False
225 227
226 228 res.msg = httplib.HTTPMessage(res.fp)
227 229 res.msg.fp = None
228 230
229 231 # are we using the chunked-style of transfer encoding?
230 232 trenc = res.msg.getheader('transfer-encoding')
231 233 if trenc and trenc.lower() == "chunked":
232 234 res.chunked = 1
233 235 res.chunk_left = None
234 236 else:
235 237 res.chunked = 0
236 238
237 239 # will the connection close at the end of the response?
238 240 res.will_close = res._check_close()
239 241
240 242 # do we have a Content-Length?
241 243 # NOTE: RFC 2616, section 4.4, #3 says we ignore this if
242 244 # transfer-encoding is "chunked"
243 245 length = res.msg.getheader('content-length')
244 246 if length and not res.chunked:
245 247 try:
246 248 res.length = int(length)
247 249 except ValueError:
248 250 res.length = None
249 251 else:
250 252 if res.length < 0: # ignore nonsensical negative lengths
251 253 res.length = None
252 254 else:
253 255 res.length = None
254 256
255 257 # does the body have a fixed length? (of zero)
256 258 if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
257 259 100 <= status < 200 or # 1xx codes
258 260 res._method == 'HEAD'):
259 261 res.length = 0
260 262
261 263 # if the connection remains open, and we aren't using chunked, and
262 264 # a content-length was not provided, then assume that the connection
263 265 # WILL close.
264 266 if (not res.will_close and
265 267 not res.chunked and
266 268 res.length is None):
267 269 res.will_close = 1
268 270
269 271 self.proxyres = res
270 272
271 273 return False
272 274
273 275 class httphandler(keepalive.HTTPHandler):
274 276 def http_open(self, req):
275 277 return self.do_open(httpconnection, req)
276 278
277 279 def _start_transaction(self, h, req):
278 280 _generic_start_transaction(self, h, req)
279 281 return keepalive.HTTPHandler._start_transaction(self, h, req)
280 282
281 283 if has_https:
282 284 class httpsconnection(httplib.HTTPConnection):
283 285 response_class = keepalive.HTTPResponse
284 286 default_port = httplib.HTTPS_PORT
285 287 # must be able to send big bundle as stream.
286 288 send = _gen_sendfile(keepalive.safesend)
287 289 getresponse = keepalive.wrapgetresponse(httplib.HTTPConnection)
288 290
289 291 def __init__(self, host, port=None, key_file=None, cert_file=None,
290 292 *args, **kwargs):
291 293 httplib.HTTPConnection.__init__(self, host, port, *args, **kwargs)
292 294 self.key_file = key_file
293 295 self.cert_file = cert_file
294 296
295 297 def connect(self):
296 298 self.sock = socket.create_connection((self.host, self.port))
297 299
298 300 host = self.host
299 301 if self.realhostport: # use CONNECT proxy
300 302 _generic_proxytunnel(self)
301 303 host = self.realhostport.rsplit(':', 1)[0]
302 304 self.sock = sslutil.wrapsocket(
303 305 self.sock, self.key_file, self.cert_file, ui=self.ui,
304 306 serverhostname=host)
305 307 sslutil.validatesocket(self.sock)
306 308
307 309 class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
308 310 def __init__(self, ui):
309 311 keepalive.KeepAliveHandler.__init__(self)
310 312 urlreq.httpshandler.__init__(self)
311 313 self.ui = ui
312 314 self.pwmgr = passwordmgr(self.ui,
313 315 self.ui.httppasswordmgrdb)
314 316
315 317 def _start_transaction(self, h, req):
316 318 _generic_start_transaction(self, h, req)
317 319 return keepalive.KeepAliveHandler._start_transaction(self, h, req)
318 320
319 321 def https_open(self, req):
320 322 # req.get_full_url() does not contain credentials and we may
321 323 # need them to match the certificates.
322 324 url = req.get_full_url()
323 325 user, password = self.pwmgr.find_stored_password(url)
324 326 res = httpconnectionmod.readauthforuri(self.ui, url, user)
325 327 if res:
326 328 group, auth = res
327 329 self.auth = auth
328 330 self.ui.debug("using auth.%s.* for authentication\n" % group)
329 331 else:
330 332 self.auth = None
331 333 return self.do_open(self._makeconnection, req)
332 334
333 335 def _makeconnection(self, host, port=None, *args, **kwargs):
334 336 keyfile = None
335 337 certfile = None
336 338
337 339 if len(args) >= 1: # key_file
338 340 keyfile = args[0]
339 341 if len(args) >= 2: # cert_file
340 342 certfile = args[1]
341 343 args = args[2:]
342 344
343 345 # if the user has specified different key/cert files in
344 346 # hgrc, we prefer these
345 347 if self.auth and 'key' in self.auth and 'cert' in self.auth:
346 348 keyfile = self.auth['key']
347 349 certfile = self.auth['cert']
348 350
349 351 conn = httpsconnection(host, port, keyfile, certfile, *args,
350 352 **kwargs)
351 353 conn.ui = self.ui
352 354 return conn
353 355
354 356 class httpdigestauthhandler(urlreq.httpdigestauthhandler):
355 357 def __init__(self, *args, **kwargs):
356 358 urlreq.httpdigestauthhandler.__init__(self, *args, **kwargs)
357 359 self.retried_req = None
358 360
359 361 def reset_retry_count(self):
360 362 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
361 363 # forever. We disable reset_retry_count completely and reset in
362 364 # http_error_auth_reqed instead.
363 365 pass
364 366
365 367 def http_error_auth_reqed(self, auth_header, host, req, headers):
366 368 # Reset the retry counter once for each request.
367 369 if req is not self.retried_req:
368 370 self.retried_req = req
369 371 self.retried = 0
370 372 return urlreq.httpdigestauthhandler.http_error_auth_reqed(
371 373 self, auth_header, host, req, headers)
372 374
373 375 class httpbasicauthhandler(urlreq.httpbasicauthhandler):
374 376 def __init__(self, *args, **kwargs):
375 377 self.auth = None
376 378 urlreq.httpbasicauthhandler.__init__(self, *args, **kwargs)
377 379 self.retried_req = None
378 380
379 381 def http_request(self, request):
380 382 if self.auth:
381 383 request.add_unredirected_header(self.auth_header, self.auth)
382 384
383 385 return request
384 386
385 387 def https_request(self, request):
386 388 if self.auth:
387 389 request.add_unredirected_header(self.auth_header, self.auth)
388 390
389 391 return request
390 392
391 393 def reset_retry_count(self):
392 394 # Python 2.6.5 will call this on 401 or 407 errors and thus loop
393 395 # forever. We disable reset_retry_count completely and reset in
394 396 # http_error_auth_reqed instead.
395 397 pass
396 398
397 399 def http_error_auth_reqed(self, auth_header, host, req, headers):
398 400 # Reset the retry counter once for each request.
399 401 if req is not self.retried_req:
400 402 self.retried_req = req
401 403 self.retried = 0
402 404 return urlreq.httpbasicauthhandler.http_error_auth_reqed(
403 405 self, auth_header, host, req, headers)
404 406
405 407 def retry_http_basic_auth(self, host, req, realm):
406 408 user, pw = self.passwd.find_user_password(realm, req.get_full_url())
407 409 if pw is not None:
408 410 raw = "%s:%s" % (user, pw)
409 411 auth = 'Basic %s' % base64.b64encode(raw).strip()
410 412 if req.get_header(self.auth_header, None) == auth:
411 413 return None
412 414 self.auth = auth
413 415 req.add_unredirected_header(self.auth_header, auth)
414 416 return self.parent.open(req)
415 417 else:
416 418 return None
417 419
418 420 handlerfuncs = []
419 421
420 422 def opener(ui, authinfo=None):
421 423 '''
422 424 construct an opener suitable for urllib2
423 425 authinfo will be added to the password manager
424 426 '''
425 427 # experimental config: ui.usehttp2
426 428 if ui.configbool('ui', 'usehttp2', False):
427 429 handlers = [
428 430 httpconnectionmod.http2handler(
429 431 ui,
430 432 passwordmgr(ui, ui.httppasswordmgrdb))
431 433 ]
432 434 else:
433 435 handlers = [httphandler()]
434 436 if has_https:
435 437 handlers.append(httpshandler(ui))
436 438
437 439 handlers.append(proxyhandler(ui))
438 440
439 441 passmgr = passwordmgr(ui, ui.httppasswordmgrdb)
440 442 if authinfo is not None:
441 443 realm, uris, user, passwd = authinfo
442 444 saveduser, savedpass = passmgr.find_stored_password(uris[0])
443 445 if user != saveduser or passwd:
444 446 passmgr.add_password(realm, uris, user, passwd)
445 447 ui.debug('http auth: user %s, password %s\n' %
446 448 (user, passwd and '*' * len(passwd) or 'not set'))
447 449
448 450 handlers.extend((httpbasicauthhandler(passmgr),
449 451 httpdigestauthhandler(passmgr)))
450 452 handlers.extend([h(ui, passmgr) for h in handlerfuncs])
451 453 opener = urlreq.buildopener(*handlers)
452 454
453 455 # The user agent should should *NOT* be used by servers for e.g.
454 456 # protocol detection or feature negotiation: there are other
455 457 # facilities for that.
456 458 #
457 459 # "mercurial/proto-1.0" was the original user agent string and
458 460 # exists for backwards compatibility reasons.
459 461 #
460 462 # The "(Mercurial %s)" string contains the distribution
461 463 # name and version. Other client implementations should choose their
462 464 # own distribution name. Since servers should not be using the user
463 465 # agent string for anything, clients should be able to define whatever
464 466 # user agent they deem appropriate.
465 467 agent = 'mercurial/proto-1.0 (Mercurial %s)' % util.version()
466 468 opener.addheaders = [('User-agent', agent)]
467 469 opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
468 470 return opener
469 471
470 472 def open(ui, url_, data=None):
471 473 u = util.url(url_)
472 474 if u.scheme:
473 475 u.scheme = u.scheme.lower()
474 476 url_, authinfo = u.authinfo()
475 477 else:
476 478 path = util.normpath(os.path.abspath(url_))
477 479 url_ = 'file://' + urlreq.pathname2url(path)
478 480 authinfo = None
479 481 return opener(ui, authinfo).open(url_, data)
General Comments 0
You need to be logged in to leave comments. Login now