##// END OF EJS Templates
clfilter: rename `unfilteredmeth` to `unfilteredmethod`...
Pierre-Yves David -
r18016:2a393df0 default
parent child Browse files
Show More
@@ -1,479 +1,479 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import types
12 12 import os
13 13
14 14 from mercurial import context, error, manifest, match as match_, util
15 15 from mercurial import node as node_
16 16 from mercurial.i18n import _
17 17 from mercurial import localrepo
18 18
19 19 import lfcommands
20 20 import proto
21 21 import lfutil
22 22
23 23 def reposetup(ui, repo):
24 24 # wire repositories should be given new wireproto functions but not the
25 25 # other largefiles modifications
26 26 if not repo.local():
27 27 return proto.wirereposetup(ui, repo)
28 28
29 29 for name in ('status', 'commitctx', 'commit', 'push'):
30 30 method = getattr(repo, name)
31 31 if (isinstance(method, types.FunctionType) and
32 32 method.func_name == 'wrap'):
33 33 ui.warn(_('largefiles: repo method %r appears to have already been'
34 34 ' wrapped by another extension: '
35 35 'largefiles may behave incorrectly\n')
36 36 % name)
37 37
38 38 class lfilesrepo(repo.__class__):
39 39 lfstatus = False
40 40 def status_nolfiles(self, *args, **kwargs):
41 41 return super(lfilesrepo, self).status(*args, **kwargs)
42 42
43 43 # When lfstatus is set, return a context that gives the names
44 44 # of largefiles instead of their corresponding standins and
45 45 # identifies the largefiles as always binary, regardless of
46 46 # their actual contents.
47 47 def __getitem__(self, changeid):
48 48 ctx = super(lfilesrepo, self).__getitem__(changeid)
49 49 if self.lfstatus:
50 50 class lfilesmanifestdict(manifest.manifestdict):
51 51 def __contains__(self, filename):
52 52 if super(lfilesmanifestdict,
53 53 self).__contains__(filename):
54 54 return True
55 55 return super(lfilesmanifestdict,
56 56 self).__contains__(lfutil.standin(filename))
57 57 class lfilesctx(ctx.__class__):
58 58 def files(self):
59 59 filenames = super(lfilesctx, self).files()
60 60 return [lfutil.splitstandin(f) or f for f in filenames]
61 61 def manifest(self):
62 62 man1 = super(lfilesctx, self).manifest()
63 63 man1.__class__ = lfilesmanifestdict
64 64 return man1
65 65 def filectx(self, path, fileid=None, filelog=None):
66 66 try:
67 67 if filelog is not None:
68 68 result = super(lfilesctx, self).filectx(
69 69 path, fileid, filelog)
70 70 else:
71 71 result = super(lfilesctx, self).filectx(
72 72 path, fileid)
73 73 except error.LookupError:
74 74 # Adding a null character will cause Mercurial to
75 75 # identify this as a binary file.
76 76 if filelog is not None:
77 77 result = super(lfilesctx, self).filectx(
78 78 lfutil.standin(path), fileid, filelog)
79 79 else:
80 80 result = super(lfilesctx, self).filectx(
81 81 lfutil.standin(path), fileid)
82 82 olddata = result.data
83 83 result.data = lambda: olddata() + '\0'
84 84 return result
85 85 ctx.__class__ = lfilesctx
86 86 return ctx
87 87
88 88 # Figure out the status of big files and insert them into the
89 89 # appropriate list in the result. Also removes standin files
90 90 # from the listing. Revert to the original status if
91 91 # self.lfstatus is False.
92 92 # XXX large file status is buggy when used on repo proxy.
93 93 # XXX this needs to be investigated.
94 @localrepo.unfilteredmeth
94 @localrepo.unfilteredmethod
95 95 def status(self, node1='.', node2=None, match=None, ignored=False,
96 96 clean=False, unknown=False, listsubrepos=False):
97 97 listignored, listclean, listunknown = ignored, clean, unknown
98 98 if not self.lfstatus:
99 99 return super(lfilesrepo, self).status(node1, node2, match,
100 100 listignored, listclean, listunknown, listsubrepos)
101 101 else:
102 102 # some calls in this function rely on the old version of status
103 103 self.lfstatus = False
104 104 if isinstance(node1, context.changectx):
105 105 ctx1 = node1
106 106 else:
107 107 ctx1 = self[node1]
108 108 if isinstance(node2, context.changectx):
109 109 ctx2 = node2
110 110 else:
111 111 ctx2 = self[node2]
112 112 working = ctx2.rev() is None
113 113 parentworking = working and ctx1 == self['.']
114 114
115 115 def inctx(file, ctx):
116 116 try:
117 117 if ctx.rev() is None:
118 118 return file in ctx.manifest()
119 119 ctx[file]
120 120 return True
121 121 except KeyError:
122 122 return False
123 123
124 124 if match is None:
125 125 match = match_.always(self.root, self.getcwd())
126 126
127 127 # First check if there were files specified on the
128 128 # command line. If there were, and none of them were
129 129 # largefiles, we should just bail here and let super
130 130 # handle it -- thus gaining a big performance boost.
131 131 lfdirstate = lfutil.openlfdirstate(ui, self)
132 132 if match.files() and not match.anypats():
133 133 for f in lfdirstate:
134 134 if match(f):
135 135 break
136 136 else:
137 137 return super(lfilesrepo, self).status(node1, node2,
138 138 match, listignored, listclean,
139 139 listunknown, listsubrepos)
140 140
141 141 # Create a copy of match that matches standins instead
142 142 # of largefiles.
143 143 def tostandins(files):
144 144 if not working:
145 145 return files
146 146 newfiles = []
147 147 dirstate = self.dirstate
148 148 for f in files:
149 149 sf = lfutil.standin(f)
150 150 if sf in dirstate:
151 151 newfiles.append(sf)
152 152 elif sf in dirstate.dirs():
153 153 # Directory entries could be regular or
154 154 # standin, check both
155 155 newfiles.extend((f, sf))
156 156 else:
157 157 newfiles.append(f)
158 158 return newfiles
159 159
160 160 # Create a function that we can use to override what is
161 161 # normally the ignore matcher. We've already checked
162 162 # for ignored files on the first dirstate walk, and
163 163 # unnecessarily re-checking here causes a huge performance
164 164 # hit because lfdirstate only knows about largefiles
165 165 def _ignoreoverride(self):
166 166 return False
167 167
168 168 m = copy.copy(match)
169 169 m._files = tostandins(m._files)
170 170
171 171 # Get ignored files here even if we weren't asked for them; we
172 172 # must use the result here for filtering later
173 173 result = super(lfilesrepo, self).status(node1, node2, m,
174 174 True, clean, unknown, listsubrepos)
175 175 if working:
176 176 try:
177 177 # Any non-largefiles that were explicitly listed must be
178 178 # taken out or lfdirstate.status will report an error.
179 179 # The status of these files was already computed using
180 180 # super's status.
181 181 # Override lfdirstate's ignore matcher to not do
182 182 # anything
183 183 origignore = lfdirstate._ignore
184 184 lfdirstate._ignore = _ignoreoverride
185 185
186 186 def sfindirstate(f):
187 187 sf = lfutil.standin(f)
188 188 dirstate = self.dirstate
189 189 return sf in dirstate or sf in dirstate.dirs()
190 190 match._files = [f for f in match._files
191 191 if sfindirstate(f)]
192 192 # Don't waste time getting the ignored and unknown
193 193 # files again; we already have them
194 194 s = lfdirstate.status(match, [], False,
195 195 listclean, False)
196 196 (unsure, modified, added, removed, missing, unknown,
197 197 ignored, clean) = s
198 198 # Replace the list of ignored and unknown files with
199 199 # the previously calculated lists, and strip out the
200 200 # largefiles
201 201 lfiles = set(lfdirstate._map)
202 202 ignored = set(result[5]).difference(lfiles)
203 203 unknown = set(result[4]).difference(lfiles)
204 204 if parentworking:
205 205 for lfile in unsure:
206 206 standin = lfutil.standin(lfile)
207 207 if standin not in ctx1:
208 208 # from second parent
209 209 modified.append(lfile)
210 210 elif ctx1[standin].data().strip() \
211 211 != lfutil.hashfile(self.wjoin(lfile)):
212 212 modified.append(lfile)
213 213 else:
214 214 clean.append(lfile)
215 215 lfdirstate.normal(lfile)
216 216 else:
217 217 tocheck = unsure + modified + added + clean
218 218 modified, added, clean = [], [], []
219 219
220 220 for lfile in tocheck:
221 221 standin = lfutil.standin(lfile)
222 222 if inctx(standin, ctx1):
223 223 if ctx1[standin].data().strip() != \
224 224 lfutil.hashfile(self.wjoin(lfile)):
225 225 modified.append(lfile)
226 226 else:
227 227 clean.append(lfile)
228 228 else:
229 229 added.append(lfile)
230 230 finally:
231 231 # Replace the original ignore function
232 232 lfdirstate._ignore = origignore
233 233
234 234 for standin in ctx1.manifest():
235 235 if not lfutil.isstandin(standin):
236 236 continue
237 237 lfile = lfutil.splitstandin(standin)
238 238 if not match(lfile):
239 239 continue
240 240 if lfile not in lfdirstate:
241 241 removed.append(lfile)
242 242
243 243 # Filter result lists
244 244 result = list(result)
245 245
246 246 # Largefiles are not really removed when they're
247 247 # still in the normal dirstate. Likewise, normal
248 248 # files are not really removed if it's still in
249 249 # lfdirstate. This happens in merges where files
250 250 # change type.
251 251 removed = [f for f in removed if f not in self.dirstate]
252 252 result[2] = [f for f in result[2] if f not in lfdirstate]
253 253
254 254 # Unknown files
255 255 unknown = set(unknown).difference(ignored)
256 256 result[4] = [f for f in unknown
257 257 if (self.dirstate[f] == '?' and
258 258 not lfutil.isstandin(f))]
259 259 # Ignored files were calculated earlier by the dirstate,
260 260 # and we already stripped out the largefiles from the list
261 261 result[5] = ignored
262 262 # combine normal files and largefiles
263 263 normals = [[fn for fn in filelist
264 264 if not lfutil.isstandin(fn)]
265 265 for filelist in result]
266 266 lfiles = (modified, added, removed, missing, [], [], clean)
267 267 result = [sorted(list1 + list2)
268 268 for (list1, list2) in zip(normals, lfiles)]
269 269 else:
270 270 def toname(f):
271 271 if lfutil.isstandin(f):
272 272 return lfutil.splitstandin(f)
273 273 return f
274 274 result = [[toname(f) for f in items] for items in result]
275 275
276 276 if not listunknown:
277 277 result[4] = []
278 278 if not listignored:
279 279 result[5] = []
280 280 if not listclean:
281 281 result[6] = []
282 282 self.lfstatus = True
283 283 return result
284 284
285 285 # As part of committing, copy all of the largefiles into the
286 286 # cache.
287 287 def commitctx(self, *args, **kwargs):
288 288 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
289 289 lfutil.copyalltostore(self, node)
290 290 return node
291 291
292 292 # Before commit, largefile standins have not had their
293 293 # contents updated to reflect the hash of their largefile.
294 294 # Do that here.
295 295 def commit(self, text="", user=None, date=None, match=None,
296 296 force=False, editor=False, extra={}):
297 297 orig = super(lfilesrepo, self).commit
298 298
299 299 wlock = self.wlock()
300 300 try:
301 301 # Case 0: Rebase or Transplant
302 302 # We have to take the time to pull down the new largefiles now.
303 303 # Otherwise, any largefiles that were modified in the
304 304 # destination changesets get overwritten, either by the rebase
305 305 # or in the first commit after the rebase or transplant.
306 306 # updatelfiles will update the dirstate to mark any pulled
307 307 # largefiles as modified
308 308 if getattr(self, "_isrebasing", False) or \
309 309 getattr(self, "_istransplanting", False):
310 310 lfcommands.updatelfiles(self.ui, self, filelist=None,
311 311 printmessage=False)
312 312 result = orig(text=text, user=user, date=date, match=match,
313 313 force=force, editor=editor, extra=extra)
314 314 return result
315 315 # Case 1: user calls commit with no specific files or
316 316 # include/exclude patterns: refresh and commit all files that
317 317 # are "dirty".
318 318 if ((match is None) or
319 319 (not match.anypats() and not match.files())):
320 320 # Spend a bit of time here to get a list of files we know
321 321 # are modified so we can compare only against those.
322 322 # It can cost a lot of time (several seconds)
323 323 # otherwise to update all standins if the largefiles are
324 324 # large.
325 325 lfdirstate = lfutil.openlfdirstate(ui, self)
326 326 dirtymatch = match_.always(self.root, self.getcwd())
327 327 s = lfdirstate.status(dirtymatch, [], False, False, False)
328 328 modifiedfiles = []
329 329 for i in s:
330 330 modifiedfiles.extend(i)
331 331 lfiles = lfutil.listlfiles(self)
332 332 # this only loops through largefiles that exist (not
333 333 # removed/renamed)
334 334 for lfile in lfiles:
335 335 if lfile in modifiedfiles:
336 336 if os.path.exists(
337 337 self.wjoin(lfutil.standin(lfile))):
338 338 # this handles the case where a rebase is being
339 339 # performed and the working copy is not updated
340 340 # yet.
341 341 if os.path.exists(self.wjoin(lfile)):
342 342 lfutil.updatestandin(self,
343 343 lfutil.standin(lfile))
344 344 lfdirstate.normal(lfile)
345 345
346 346 result = orig(text=text, user=user, date=date, match=match,
347 347 force=force, editor=editor, extra=extra)
348 348
349 349 if result is not None:
350 350 for lfile in lfdirstate:
351 351 if lfile in modifiedfiles:
352 352 if (not os.path.exists(self.wjoin(
353 353 lfutil.standin(lfile)))) or \
354 354 (not os.path.exists(self.wjoin(lfile))):
355 355 lfdirstate.drop(lfile)
356 356
357 357 # This needs to be after commit; otherwise precommit hooks
358 358 # get the wrong status
359 359 lfdirstate.write()
360 360 return result
361 361
362 362 for f in match.files():
363 363 if lfutil.isstandin(f):
364 364 raise util.Abort(
365 365 _('file "%s" is a largefile standin') % f,
366 366 hint=('commit the largefile itself instead'))
367 367
368 368 # Case 2: user calls commit with specified patterns: refresh
369 369 # any matching big files.
370 370 smatcher = lfutil.composestandinmatcher(self, match)
371 371 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
372 372
373 373 # No matching big files: get out of the way and pass control to
374 374 # the usual commit() method.
375 375 if not standins:
376 376 return orig(text=text, user=user, date=date, match=match,
377 377 force=force, editor=editor, extra=extra)
378 378
379 379 # Refresh all matching big files. It's possible that the
380 380 # commit will end up failing, in which case the big files will
381 381 # stay refreshed. No harm done: the user modified them and
382 382 # asked to commit them, so sooner or later we're going to
383 383 # refresh the standins. Might as well leave them refreshed.
384 384 lfdirstate = lfutil.openlfdirstate(ui, self)
385 385 for standin in standins:
386 386 lfile = lfutil.splitstandin(standin)
387 387 if lfdirstate[lfile] <> 'r':
388 388 lfutil.updatestandin(self, standin)
389 389 lfdirstate.normal(lfile)
390 390 else:
391 391 lfdirstate.drop(lfile)
392 392
393 393 # Cook up a new matcher that only matches regular files or
394 394 # standins corresponding to the big files requested by the
395 395 # user. Have to modify _files to prevent commit() from
396 396 # complaining "not tracked" for big files.
397 397 lfiles = lfutil.listlfiles(self)
398 398 match = copy.copy(match)
399 399 origmatchfn = match.matchfn
400 400
401 401 # Check both the list of largefiles and the list of
402 402 # standins because if a largefile was removed, it
403 403 # won't be in the list of largefiles at this point
404 404 match._files += sorted(standins)
405 405
406 406 actualfiles = []
407 407 for f in match._files:
408 408 fstandin = lfutil.standin(f)
409 409
410 410 # ignore known largefiles and standins
411 411 if f in lfiles or fstandin in standins:
412 412 continue
413 413
414 414 # append directory separator to avoid collisions
415 415 if not fstandin.endswith(os.sep):
416 416 fstandin += os.sep
417 417
418 418 actualfiles.append(f)
419 419 match._files = actualfiles
420 420
421 421 def matchfn(f):
422 422 if origmatchfn(f):
423 423 return f not in lfiles
424 424 else:
425 425 return f in standins
426 426
427 427 match.matchfn = matchfn
428 428 result = orig(text=text, user=user, date=date, match=match,
429 429 force=force, editor=editor, extra=extra)
430 430 # This needs to be after commit; otherwise precommit hooks
431 431 # get the wrong status
432 432 lfdirstate.write()
433 433 return result
434 434 finally:
435 435 wlock.release()
436 436
437 437 def push(self, remote, force=False, revs=None, newbranch=False):
438 438 o = lfutil.findoutgoing(self, remote, force)
439 439 if o:
440 440 toupload = set()
441 441 o = self.changelog.nodesbetween(o, revs)[0]
442 442 for n in o:
443 443 parents = [p for p in self.changelog.parents(n)
444 444 if p != node_.nullid]
445 445 ctx = self[n]
446 446 files = set(ctx.files())
447 447 if len(parents) == 2:
448 448 mc = ctx.manifest()
449 449 mp1 = ctx.parents()[0].manifest()
450 450 mp2 = ctx.parents()[1].manifest()
451 451 for f in mp1:
452 452 if f not in mc:
453 453 files.add(f)
454 454 for f in mp2:
455 455 if f not in mc:
456 456 files.add(f)
457 457 for f in mc:
458 458 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
459 459 None):
460 460 files.add(f)
461 461
462 462 toupload = toupload.union(
463 463 set([ctx[f].data().strip()
464 464 for f in files
465 465 if lfutil.isstandin(f) and f in ctx]))
466 466 lfcommands.uploadlfiles(ui, self, remote, toupload)
467 467 return super(lfilesrepo, self).push(remote, force, revs,
468 468 newbranch)
469 469
470 470 repo.__class__ = lfilesrepo
471 471
472 472 def checkrequireslfiles(ui, repo, **kwargs):
473 473 if 'largefiles' not in repo.requirements and util.any(
474 474 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
475 475 repo.requirements.add('largefiles')
476 476 repo._writerequirements()
477 477
478 478 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
479 479 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,2680 +1,2680 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class repofilecache(filecache):
22 22 """All filecache usage on repo are done for logic that should be unfiltered
23 23 """
24 24
25 25 def __get__(self, repo, type=None):
26 26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 27 def __set__(self, repo, value):
28 28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 29 def __delete__(self, repo):
30 30 return super(repofilecache, self).__delete__(repo.unfiltered())
31 31
32 32 class storecache(repofilecache):
33 33 """filecache for files in the store"""
34 34 def join(self, obj, fname):
35 35 return obj.sjoin(fname)
36 36
37 37 class unfilteredpropertycache(propertycache):
38 38 """propertycache that apply to unfiltered repo only"""
39 39
40 40 def __get__(self, repo, type=None):
41 41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 42
43 43 class filteredpropertycache(propertycache):
44 44 """propertycache that must take filtering in account"""
45 45
46 46 def cachevalue(self, obj, value):
47 47 object.__setattr__(obj, self.name, value)
48 48
49 49
50 50 def hasunfilteredcache(repo, name):
51 51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 52 return name in vars(repo.unfiltered())
53 53
54 def unfilteredmeth(orig):
54 def unfilteredmethod(orig):
55 55 """decorate method that always need to be run on unfiltered version"""
56 56 def wrapper(repo, *args, **kwargs):
57 57 return orig(repo.unfiltered(), *args, **kwargs)
58 58 return wrapper
59 59
60 60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 62
63 63 class localpeer(peer.peerrepository):
64 64 '''peer for a local repo; reflects only the most recent API'''
65 65
66 66 def __init__(self, repo, caps=MODERNCAPS):
67 67 peer.peerrepository.__init__(self)
68 68 self._repo = repo
69 69 self.ui = repo.ui
70 70 self._caps = repo._restrictcapabilities(caps)
71 71 self.requirements = repo.requirements
72 72 self.supportedformats = repo.supportedformats
73 73
74 74 def close(self):
75 75 self._repo.close()
76 76
77 77 def _capabilities(self):
78 78 return self._caps
79 79
80 80 def local(self):
81 81 return self._repo
82 82
83 83 def canpush(self):
84 84 return True
85 85
86 86 def url(self):
87 87 return self._repo.url()
88 88
89 89 def lookup(self, key):
90 90 return self._repo.lookup(key)
91 91
92 92 def branchmap(self):
93 93 return discovery.visiblebranchmap(self._repo)
94 94
95 95 def heads(self):
96 96 return discovery.visibleheads(self._repo)
97 97
98 98 def known(self, nodes):
99 99 return self._repo.known(nodes)
100 100
101 101 def getbundle(self, source, heads=None, common=None):
102 102 return self._repo.getbundle(source, heads=heads, common=common)
103 103
104 104 # TODO We might want to move the next two calls into legacypeer and add
105 105 # unbundle instead.
106 106
107 107 def lock(self):
108 108 return self._repo.lock()
109 109
110 110 def addchangegroup(self, cg, source, url):
111 111 return self._repo.addchangegroup(cg, source, url)
112 112
113 113 def pushkey(self, namespace, key, old, new):
114 114 return self._repo.pushkey(namespace, key, old, new)
115 115
116 116 def listkeys(self, namespace):
117 117 return self._repo.listkeys(namespace)
118 118
119 119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 120 '''used to test argument passing over the wire'''
121 121 return "%s %s %s %s %s" % (one, two, three, four, five)
122 122
123 123 class locallegacypeer(localpeer):
124 124 '''peer extension which implements legacy methods too; used for tests with
125 125 restricted capabilities'''
126 126
127 127 def __init__(self, repo):
128 128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 129
130 130 def branches(self, nodes):
131 131 return self._repo.branches(nodes)
132 132
133 133 def between(self, pairs):
134 134 return self._repo.between(pairs)
135 135
136 136 def changegroup(self, basenodes, source):
137 137 return self._repo.changegroup(basenodes, source)
138 138
139 139 def changegroupsubset(self, bases, heads, source):
140 140 return self._repo.changegroupsubset(bases, heads, source)
141 141
142 142 class localrepository(object):
143 143
144 144 supportedformats = set(('revlogv1', 'generaldelta'))
145 145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 146 'dotencode'))
147 147 openerreqs = set(('revlogv1', 'generaldelta'))
148 148 requirements = ['revlogv1']
149 149
150 150 def _baserequirements(self, create):
151 151 return self.requirements[:]
152 152
153 153 def __init__(self, baseui, path=None, create=False):
154 154 self.wvfs = scmutil.vfs(path, expand=True)
155 155 self.wopener = self.wvfs
156 156 self.root = self.wvfs.base
157 157 self.path = self.wvfs.join(".hg")
158 158 self.origroot = path
159 159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 160 self.vfs = scmutil.vfs(self.path)
161 161 self.opener = self.vfs
162 162 self.baseui = baseui
163 163 self.ui = baseui.copy()
164 164 # A list of callback to shape the phase if no data were found.
165 165 # Callback are in the form: func(repo, roots) --> processed root.
166 166 # This list it to be filled by extension during repo setup
167 167 self._phasedefaults = []
168 168 try:
169 169 self.ui.readconfig(self.join("hgrc"), self.root)
170 170 extensions.loadall(self.ui)
171 171 except IOError:
172 172 pass
173 173
174 174 if not self.vfs.isdir():
175 175 if create:
176 176 if not self.wvfs.exists():
177 177 self.wvfs.makedirs()
178 178 self.vfs.makedir(notindexed=True)
179 179 requirements = self._baserequirements(create)
180 180 if self.ui.configbool('format', 'usestore', True):
181 181 self.vfs.mkdir("store")
182 182 requirements.append("store")
183 183 if self.ui.configbool('format', 'usefncache', True):
184 184 requirements.append("fncache")
185 185 if self.ui.configbool('format', 'dotencode', True):
186 186 requirements.append('dotencode')
187 187 # create an invalid changelog
188 188 self.vfs.append(
189 189 "00changelog.i",
190 190 '\0\0\0\2' # represents revlogv2
191 191 ' dummy changelog to prevent using the old repo layout'
192 192 )
193 193 if self.ui.configbool('format', 'generaldelta', False):
194 194 requirements.append("generaldelta")
195 195 requirements = set(requirements)
196 196 else:
197 197 raise error.RepoError(_("repository %s not found") % path)
198 198 elif create:
199 199 raise error.RepoError(_("repository %s already exists") % path)
200 200 else:
201 201 try:
202 202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 203 except IOError, inst:
204 204 if inst.errno != errno.ENOENT:
205 205 raise
206 206 requirements = set()
207 207
208 208 self.sharedpath = self.path
209 209 try:
210 210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 211 if not os.path.exists(s):
212 212 raise error.RepoError(
213 213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 214 self.sharedpath = s
215 215 except IOError, inst:
216 216 if inst.errno != errno.ENOENT:
217 217 raise
218 218
219 219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 220 self.spath = self.store.path
221 221 self.svfs = self.store.vfs
222 222 self.sopener = self.svfs
223 223 self.sjoin = self.store.join
224 224 self.vfs.createmode = self.store.createmode
225 225 self._applyrequirements(requirements)
226 226 if create:
227 227 self._writerequirements()
228 228
229 229
230 230 self._branchcache = None
231 231 self._branchcachetip = None
232 232 self.filterpats = {}
233 233 self._datafilters = {}
234 234 self._transref = self._lockref = self._wlockref = None
235 235
236 236 # A cache for various files under .hg/ that tracks file changes,
237 237 # (used by the filecache decorator)
238 238 #
239 239 # Maps a property name to its util.filecacheentry
240 240 self._filecache = {}
241 241
242 242 def close(self):
243 243 pass
244 244
245 245 def _restrictcapabilities(self, caps):
246 246 return caps
247 247
248 248 def _applyrequirements(self, requirements):
249 249 self.requirements = requirements
250 250 self.sopener.options = dict((r, 1) for r in requirements
251 251 if r in self.openerreqs)
252 252
253 253 def _writerequirements(self):
254 254 reqfile = self.opener("requires", "w")
255 255 for r in self.requirements:
256 256 reqfile.write("%s\n" % r)
257 257 reqfile.close()
258 258
259 259 def _checknested(self, path):
260 260 """Determine if path is a legal nested repository."""
261 261 if not path.startswith(self.root):
262 262 return False
263 263 subpath = path[len(self.root) + 1:]
264 264 normsubpath = util.pconvert(subpath)
265 265
266 266 # XXX: Checking against the current working copy is wrong in
267 267 # the sense that it can reject things like
268 268 #
269 269 # $ hg cat -r 10 sub/x.txt
270 270 #
271 271 # if sub/ is no longer a subrepository in the working copy
272 272 # parent revision.
273 273 #
274 274 # However, it can of course also allow things that would have
275 275 # been rejected before, such as the above cat command if sub/
276 276 # is a subrepository now, but was a normal directory before.
277 277 # The old path auditor would have rejected by mistake since it
278 278 # panics when it sees sub/.hg/.
279 279 #
280 280 # All in all, checking against the working copy seems sensible
281 281 # since we want to prevent access to nested repositories on
282 282 # the filesystem *now*.
283 283 ctx = self[None]
284 284 parts = util.splitpath(subpath)
285 285 while parts:
286 286 prefix = '/'.join(parts)
287 287 if prefix in ctx.substate:
288 288 if prefix == normsubpath:
289 289 return True
290 290 else:
291 291 sub = ctx.sub(prefix)
292 292 return sub.checknested(subpath[len(prefix) + 1:])
293 293 else:
294 294 parts.pop()
295 295 return False
296 296
297 297 def peer(self):
298 298 return localpeer(self) # not cached to avoid reference cycle
299 299
300 300 def unfiltered(self):
301 301 """Return unfiltered version of the repository
302 302
303 303 Intended to be ovewritten by filtered repo."""
304 304 return self
305 305
306 306 @repofilecache('bookmarks')
307 307 def _bookmarks(self):
308 308 return bookmarks.bmstore(self)
309 309
310 310 @repofilecache('bookmarks.current')
311 311 def _bookmarkcurrent(self):
312 312 return bookmarks.readcurrent(self)
313 313
314 314 def bookmarkheads(self, bookmark):
315 315 name = bookmark.split('@', 1)[0]
316 316 heads = []
317 317 for mark, n in self._bookmarks.iteritems():
318 318 if mark.split('@', 1)[0] == name:
319 319 heads.append(n)
320 320 return heads
321 321
322 322 @storecache('phaseroots')
323 323 def _phasecache(self):
324 324 return phases.phasecache(self, self._phasedefaults)
325 325
326 326 @storecache('obsstore')
327 327 def obsstore(self):
328 328 store = obsolete.obsstore(self.sopener)
329 329 if store and not obsolete._enabled:
330 330 # message is rare enough to not be translated
331 331 msg = 'obsolete feature not enabled but %i markers found!\n'
332 332 self.ui.warn(msg % len(list(store)))
333 333 return store
334 334
335 335 @unfilteredpropertycache
336 336 def hiddenrevs(self):
337 337 """hiddenrevs: revs that should be hidden by command and tools
338 338
339 339 This set is carried on the repo to ease initialization and lazy
340 340 loading; it'll probably move back to changelog for efficiency and
341 341 consistency reasons.
342 342
343 343 Note that the hiddenrevs will needs invalidations when
344 344 - a new changesets is added (possible unstable above extinct)
345 345 - a new obsolete marker is added (possible new extinct changeset)
346 346
347 347 hidden changesets cannot have non-hidden descendants
348 348 """
349 349 hidden = set()
350 350 if self.obsstore:
351 351 ### hide extinct changeset that are not accessible by any mean
352 352 hiddenquery = 'extinct() - ::(. + bookmark())'
353 353 hidden.update(self.revs(hiddenquery))
354 354 return hidden
355 355
356 356 @storecache('00changelog.i')
357 357 def changelog(self):
358 358 c = changelog.changelog(self.sopener)
359 359 if 'HG_PENDING' in os.environ:
360 360 p = os.environ['HG_PENDING']
361 361 if p.startswith(self.root):
362 362 c.readpending('00changelog.i.a')
363 363 return c
364 364
365 365 @storecache('00manifest.i')
366 366 def manifest(self):
367 367 return manifest.manifest(self.sopener)
368 368
369 369 @repofilecache('dirstate')
370 370 def dirstate(self):
371 371 warned = [0]
372 372 def validate(node):
373 373 try:
374 374 self.changelog.rev(node)
375 375 return node
376 376 except error.LookupError:
377 377 if not warned[0]:
378 378 warned[0] = True
379 379 self.ui.warn(_("warning: ignoring unknown"
380 380 " working parent %s!\n") % short(node))
381 381 return nullid
382 382
383 383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 384
385 385 def __getitem__(self, changeid):
386 386 if changeid is None:
387 387 return context.workingctx(self)
388 388 return context.changectx(self, changeid)
389 389
390 390 def __contains__(self, changeid):
391 391 try:
392 392 return bool(self.lookup(changeid))
393 393 except error.RepoLookupError:
394 394 return False
395 395
396 396 def __nonzero__(self):
397 397 return True
398 398
399 399 def __len__(self):
400 400 return len(self.changelog)
401 401
402 402 def __iter__(self):
403 403 return iter(self.changelog)
404 404
405 405 def revs(self, expr, *args):
406 406 '''Return a list of revisions matching the given revset'''
407 407 expr = revset.formatspec(expr, *args)
408 408 m = revset.match(None, expr)
409 409 return [r for r in m(self, list(self))]
410 410
411 411 def set(self, expr, *args):
412 412 '''
413 413 Yield a context for each matching revision, after doing arg
414 414 replacement via revset.formatspec
415 415 '''
416 416 for r in self.revs(expr, *args):
417 417 yield self[r]
418 418
419 419 def url(self):
420 420 return 'file:' + self.root
421 421
422 422 def hook(self, name, throw=False, **args):
423 423 return hook.hook(self.ui, self, name, throw, **args)
424 424
425 @unfilteredmeth
425 @unfilteredmethod
426 426 def _tag(self, names, node, message, local, user, date, extra={}):
427 427 if isinstance(names, str):
428 428 names = (names,)
429 429
430 430 branches = self.branchmap()
431 431 for name in names:
432 432 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 433 local=local)
434 434 if name in branches:
435 435 self.ui.warn(_("warning: tag %s conflicts with existing"
436 436 " branch name\n") % name)
437 437
438 438 def writetags(fp, names, munge, prevtags):
439 439 fp.seek(0, 2)
440 440 if prevtags and prevtags[-1] != '\n':
441 441 fp.write('\n')
442 442 for name in names:
443 443 m = munge and munge(name) or name
444 444 if (self._tagscache.tagtypes and
445 445 name in self._tagscache.tagtypes):
446 446 old = self.tags().get(name, nullid)
447 447 fp.write('%s %s\n' % (hex(old), m))
448 448 fp.write('%s %s\n' % (hex(node), m))
449 449 fp.close()
450 450
451 451 prevtags = ''
452 452 if local:
453 453 try:
454 454 fp = self.opener('localtags', 'r+')
455 455 except IOError:
456 456 fp = self.opener('localtags', 'a')
457 457 else:
458 458 prevtags = fp.read()
459 459
460 460 # local tags are stored in the current charset
461 461 writetags(fp, names, None, prevtags)
462 462 for name in names:
463 463 self.hook('tag', node=hex(node), tag=name, local=local)
464 464 return
465 465
466 466 try:
467 467 fp = self.wfile('.hgtags', 'rb+')
468 468 except IOError, e:
469 469 if e.errno != errno.ENOENT:
470 470 raise
471 471 fp = self.wfile('.hgtags', 'ab')
472 472 else:
473 473 prevtags = fp.read()
474 474
475 475 # committed tags are stored in UTF-8
476 476 writetags(fp, names, encoding.fromlocal, prevtags)
477 477
478 478 fp.close()
479 479
480 480 self.invalidatecaches()
481 481
482 482 if '.hgtags' not in self.dirstate:
483 483 self[None].add(['.hgtags'])
484 484
485 485 m = matchmod.exact(self.root, '', ['.hgtags'])
486 486 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 487
488 488 for name in names:
489 489 self.hook('tag', node=hex(node), tag=name, local=local)
490 490
491 491 return tagnode
492 492
493 493 def tag(self, names, node, message, local, user, date):
494 494 '''tag a revision with one or more symbolic names.
495 495
496 496 names is a list of strings or, when adding a single tag, names may be a
497 497 string.
498 498
499 499 if local is True, the tags are stored in a per-repository file.
500 500 otherwise, they are stored in the .hgtags file, and a new
501 501 changeset is committed with the change.
502 502
503 503 keyword arguments:
504 504
505 505 local: whether to store tags in non-version-controlled file
506 506 (default False)
507 507
508 508 message: commit message to use if committing
509 509
510 510 user: name of user to use if committing
511 511
512 512 date: date tuple to use if committing'''
513 513
514 514 if not local:
515 515 for x in self.status()[:5]:
516 516 if '.hgtags' in x:
517 517 raise util.Abort(_('working copy of .hgtags is changed '
518 518 '(please commit .hgtags manually)'))
519 519
520 520 self.tags() # instantiate the cache
521 521 self._tag(names, node, message, local, user, date)
522 522
523 523 @filteredpropertycache
524 524 def _tagscache(self):
525 525 '''Returns a tagscache object that contains various tags related
526 526 caches.'''
527 527
528 528 # This simplifies its cache management by having one decorated
529 529 # function (this one) and the rest simply fetch things from it.
530 530 class tagscache(object):
531 531 def __init__(self):
532 532 # These two define the set of tags for this repository. tags
533 533 # maps tag name to node; tagtypes maps tag name to 'global' or
534 534 # 'local'. (Global tags are defined by .hgtags across all
535 535 # heads, and local tags are defined in .hg/localtags.)
536 536 # They constitute the in-memory cache of tags.
537 537 self.tags = self.tagtypes = None
538 538
539 539 self.nodetagscache = self.tagslist = None
540 540
541 541 cache = tagscache()
542 542 cache.tags, cache.tagtypes = self._findtags()
543 543
544 544 return cache
545 545
546 546 def tags(self):
547 547 '''return a mapping of tag to node'''
548 548 t = {}
549 549 if self.changelog.filteredrevs:
550 550 tags, tt = self._findtags()
551 551 else:
552 552 tags = self._tagscache.tags
553 553 for k, v in tags.iteritems():
554 554 try:
555 555 # ignore tags to unknown nodes
556 556 self.changelog.rev(v)
557 557 t[k] = v
558 558 except (error.LookupError, ValueError):
559 559 pass
560 560 return t
561 561
562 562 def _findtags(self):
563 563 '''Do the hard work of finding tags. Return a pair of dicts
564 564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 565 maps tag name to a string like \'global\' or \'local\'.
566 566 Subclasses or extensions are free to add their own tags, but
567 567 should be aware that the returned dicts will be retained for the
568 568 duration of the localrepo object.'''
569 569
570 570 # XXX what tagtype should subclasses/extensions use? Currently
571 571 # mq and bookmarks add tags, but do not set the tagtype at all.
572 572 # Should each extension invent its own tag type? Should there
573 573 # be one tagtype for all such "virtual" tags? Or is the status
574 574 # quo fine?
575 575
576 576 alltags = {} # map tag name to (node, hist)
577 577 tagtypes = {}
578 578
579 579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 581
582 582 # Build the return dicts. Have to re-encode tag names because
583 583 # the tags module always uses UTF-8 (in order not to lose info
584 584 # writing to the cache), but the rest of Mercurial wants them in
585 585 # local encoding.
586 586 tags = {}
587 587 for (name, (node, hist)) in alltags.iteritems():
588 588 if node != nullid:
589 589 tags[encoding.tolocal(name)] = node
590 590 tags['tip'] = self.changelog.tip()
591 591 tagtypes = dict([(encoding.tolocal(name), value)
592 592 for (name, value) in tagtypes.iteritems()])
593 593 return (tags, tagtypes)
594 594
595 595 def tagtype(self, tagname):
596 596 '''
597 597 return the type of the given tag. result can be:
598 598
599 599 'local' : a local tag
600 600 'global' : a global tag
601 601 None : tag does not exist
602 602 '''
603 603
604 604 return self._tagscache.tagtypes.get(tagname)
605 605
606 606 def tagslist(self):
607 607 '''return a list of tags ordered by revision'''
608 608 if not self._tagscache.tagslist:
609 609 l = []
610 610 for t, n in self.tags().iteritems():
611 611 r = self.changelog.rev(n)
612 612 l.append((r, t, n))
613 613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 614
615 615 return self._tagscache.tagslist
616 616
617 617 def nodetags(self, node):
618 618 '''return the tags associated with a node'''
619 619 if not self._tagscache.nodetagscache:
620 620 nodetagscache = {}
621 621 for t, n in self._tagscache.tags.iteritems():
622 622 nodetagscache.setdefault(n, []).append(t)
623 623 for tags in nodetagscache.itervalues():
624 624 tags.sort()
625 625 self._tagscache.nodetagscache = nodetagscache
626 626 return self._tagscache.nodetagscache.get(node, [])
627 627
628 628 def nodebookmarks(self, node):
629 629 marks = []
630 630 for bookmark, n in self._bookmarks.iteritems():
631 631 if n == node:
632 632 marks.append(bookmark)
633 633 return sorted(marks)
634 634
635 635 def _branchtags(self, partial, lrev):
636 636 # TODO: rename this function?
637 637 tiprev = len(self) - 1
638 638 if lrev != tiprev:
639 639 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
640 640 self._updatebranchcache(partial, ctxgen)
641 641 self._writebranchcache(partial, self.changelog.tip(), tiprev)
642 642
643 643 return partial
644 644
645 @unfilteredmeth # Until we get a smarter cache management
645 @unfilteredmethod # Until we get a smarter cache management
646 646 def updatebranchcache(self):
647 647 tip = self.changelog.tip()
648 648 if self._branchcache is not None and self._branchcachetip == tip:
649 649 return
650 650
651 651 oldtip = self._branchcachetip
652 652 self._branchcachetip = tip
653 653 if oldtip is None or oldtip not in self.changelog.nodemap:
654 654 partial, last, lrev = self._readbranchcache()
655 655 else:
656 656 lrev = self.changelog.rev(oldtip)
657 657 partial = self._branchcache
658 658
659 659 self._branchtags(partial, lrev)
660 660 # this private cache holds all heads (not just the branch tips)
661 661 self._branchcache = partial
662 662
663 663 def branchmap(self):
664 664 '''returns a dictionary {branch: [branchheads]}'''
665 665 if self.changelog.filteredrevs:
666 666 # some changeset are excluded we can't use the cache
667 667 branchmap = {}
668 668 self._updatebranchcache(branchmap, (self[r] for r in self))
669 669 return branchmap
670 670 else:
671 671 self.updatebranchcache()
672 672 return self._branchcache
673 673
674 674
675 675 def _branchtip(self, heads):
676 676 '''return the tipmost branch head in heads'''
677 677 tip = heads[-1]
678 678 for h in reversed(heads):
679 679 if not self[h].closesbranch():
680 680 tip = h
681 681 break
682 682 return tip
683 683
684 684 def branchtip(self, branch):
685 685 '''return the tip node for a given branch'''
686 686 if branch not in self.branchmap():
687 687 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
688 688 return self._branchtip(self.branchmap()[branch])
689 689
690 690 def branchtags(self):
691 691 '''return a dict where branch names map to the tipmost head of
692 692 the branch, open heads come before closed'''
693 693 bt = {}
694 694 for bn, heads in self.branchmap().iteritems():
695 695 bt[bn] = self._branchtip(heads)
696 696 return bt
697 697
698 @unfilteredmeth # Until we get a smarter cache management
698 @unfilteredmethod # Until we get a smarter cache management
699 699 def _readbranchcache(self):
700 700 partial = {}
701 701 try:
702 702 f = self.opener("cache/branchheads")
703 703 lines = f.read().split('\n')
704 704 f.close()
705 705 except (IOError, OSError):
706 706 return {}, nullid, nullrev
707 707
708 708 try:
709 709 last, lrev = lines.pop(0).split(" ", 1)
710 710 last, lrev = bin(last), int(lrev)
711 711 if lrev >= len(self) or self[lrev].node() != last:
712 712 # invalidate the cache
713 713 raise ValueError('invalidating branch cache (tip differs)')
714 714 for l in lines:
715 715 if not l:
716 716 continue
717 717 node, label = l.split(" ", 1)
718 718 label = encoding.tolocal(label.strip())
719 719 if not node in self:
720 720 raise ValueError('invalidating branch cache because node '+
721 721 '%s does not exist' % node)
722 722 partial.setdefault(label, []).append(bin(node))
723 723 except KeyboardInterrupt:
724 724 raise
725 725 except Exception, inst:
726 726 if self.ui.debugflag:
727 727 self.ui.warn(str(inst), '\n')
728 728 partial, last, lrev = {}, nullid, nullrev
729 729 return partial, last, lrev
730 730
731 @unfilteredmeth # Until we get a smarter cache management
731 @unfilteredmethod # Until we get a smarter cache management
732 732 def _writebranchcache(self, branches, tip, tiprev):
733 733 try:
734 734 f = self.opener("cache/branchheads", "w", atomictemp=True)
735 735 f.write("%s %s\n" % (hex(tip), tiprev))
736 736 for label, nodes in branches.iteritems():
737 737 for node in nodes:
738 738 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
739 739 f.close()
740 740 except (IOError, OSError):
741 741 pass
742 742
743 @unfilteredmeth # Until we get a smarter cache management
743 @unfilteredmethod # Until we get a smarter cache management
744 744 def _updatebranchcache(self, partial, ctxgen):
745 745 """Given a branchhead cache, partial, that may have extra nodes or be
746 746 missing heads, and a generator of nodes that are at least a superset of
747 747 heads missing, this function updates partial to be correct.
748 748 """
749 749 # collect new branch entries
750 750 newbranches = {}
751 751 for c in ctxgen:
752 752 newbranches.setdefault(c.branch(), []).append(c.node())
753 753 # if older branchheads are reachable from new ones, they aren't
754 754 # really branchheads. Note checking parents is insufficient:
755 755 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
756 756 for branch, newnodes in newbranches.iteritems():
757 757 bheads = partial.setdefault(branch, [])
758 758 # Remove candidate heads that no longer are in the repo (e.g., as
759 759 # the result of a strip that just happened). Avoid using 'node in
760 760 # self' here because that dives down into branchcache code somewhat
761 761 # recursively.
762 762 bheadrevs = [self.changelog.rev(node) for node in bheads
763 763 if self.changelog.hasnode(node)]
764 764 newheadrevs = [self.changelog.rev(node) for node in newnodes
765 765 if self.changelog.hasnode(node)]
766 766 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
767 767 # Remove duplicates - nodes that are in newheadrevs and are already
768 768 # in bheadrevs. This can happen if you strip a node whose parent
769 769 # was already a head (because they're on different branches).
770 770 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
771 771
772 772 # Starting from tip means fewer passes over reachable. If we know
773 773 # the new candidates are not ancestors of existing heads, we don't
774 774 # have to examine ancestors of existing heads
775 775 if ctxisnew:
776 776 iterrevs = sorted(newheadrevs)
777 777 else:
778 778 iterrevs = list(bheadrevs)
779 779
780 780 # This loop prunes out two kinds of heads - heads that are
781 781 # superseded by a head in newheadrevs, and newheadrevs that are not
782 782 # heads because an existing head is their descendant.
783 783 while iterrevs:
784 784 latest = iterrevs.pop()
785 785 if latest not in bheadrevs:
786 786 continue
787 787 ancestors = set(self.changelog.ancestors([latest],
788 788 bheadrevs[0]))
789 789 if ancestors:
790 790 bheadrevs = [b for b in bheadrevs if b not in ancestors]
791 791 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
792 792
793 793 # There may be branches that cease to exist when the last commit in the
794 794 # branch was stripped. This code filters them out. Note that the
795 795 # branch that ceased to exist may not be in newbranches because
796 796 # newbranches is the set of candidate heads, which when you strip the
797 797 # last commit in a branch will be the parent branch.
798 798 for branch in partial.keys():
799 799 nodes = [head for head in partial[branch]
800 800 if self.changelog.hasnode(head)]
801 801 if not nodes:
802 802 del partial[branch]
803 803
804 804 def lookup(self, key):
805 805 return self[key].node()
806 806
807 807 def lookupbranch(self, key, remote=None):
808 808 repo = remote or self
809 809 if key in repo.branchmap():
810 810 return key
811 811
812 812 repo = (remote and remote.local()) and remote or self
813 813 return repo[key].branch()
814 814
815 815 def known(self, nodes):
816 816 nm = self.changelog.nodemap
817 817 pc = self._phasecache
818 818 result = []
819 819 for n in nodes:
820 820 r = nm.get(n)
821 821 resp = not (r is None or pc.phase(self, r) >= phases.secret)
822 822 result.append(resp)
823 823 return result
824 824
825 825 def local(self):
826 826 return self
827 827
828 828 def cancopy(self):
829 829 return self.local() # so statichttprepo's override of local() works
830 830
831 831 def join(self, f):
832 832 return os.path.join(self.path, f)
833 833
834 834 def wjoin(self, f):
835 835 return os.path.join(self.root, f)
836 836
837 837 def file(self, f):
838 838 if f[0] == '/':
839 839 f = f[1:]
840 840 return filelog.filelog(self.sopener, f)
841 841
842 842 def changectx(self, changeid):
843 843 return self[changeid]
844 844
845 845 def parents(self, changeid=None):
846 846 '''get list of changectxs for parents of changeid'''
847 847 return self[changeid].parents()
848 848
849 849 def setparents(self, p1, p2=nullid):
850 850 copies = self.dirstate.setparents(p1, p2)
851 851 if copies:
852 852 # Adjust copy records, the dirstate cannot do it, it
853 853 # requires access to parents manifests. Preserve them
854 854 # only for entries added to first parent.
855 855 pctx = self[p1]
856 856 for f in copies:
857 857 if f not in pctx and copies[f] in pctx:
858 858 self.dirstate.copy(copies[f], f)
859 859
860 860 def filectx(self, path, changeid=None, fileid=None):
861 861 """changeid can be a changeset revision, node, or tag.
862 862 fileid can be a file revision or node."""
863 863 return context.filectx(self, path, changeid, fileid)
864 864
865 865 def getcwd(self):
866 866 return self.dirstate.getcwd()
867 867
868 868 def pathto(self, f, cwd=None):
869 869 return self.dirstate.pathto(f, cwd)
870 870
871 871 def wfile(self, f, mode='r'):
872 872 return self.wopener(f, mode)
873 873
874 874 def _link(self, f):
875 875 return os.path.islink(self.wjoin(f))
876 876
877 877 def _loadfilter(self, filter):
878 878 if filter not in self.filterpats:
879 879 l = []
880 880 for pat, cmd in self.ui.configitems(filter):
881 881 if cmd == '!':
882 882 continue
883 883 mf = matchmod.match(self.root, '', [pat])
884 884 fn = None
885 885 params = cmd
886 886 for name, filterfn in self._datafilters.iteritems():
887 887 if cmd.startswith(name):
888 888 fn = filterfn
889 889 params = cmd[len(name):].lstrip()
890 890 break
891 891 if not fn:
892 892 fn = lambda s, c, **kwargs: util.filter(s, c)
893 893 # Wrap old filters not supporting keyword arguments
894 894 if not inspect.getargspec(fn)[2]:
895 895 oldfn = fn
896 896 fn = lambda s, c, **kwargs: oldfn(s, c)
897 897 l.append((mf, fn, params))
898 898 self.filterpats[filter] = l
899 899 return self.filterpats[filter]
900 900
901 901 def _filter(self, filterpats, filename, data):
902 902 for mf, fn, cmd in filterpats:
903 903 if mf(filename):
904 904 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
905 905 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
906 906 break
907 907
908 908 return data
909 909
910 910 @unfilteredpropertycache
911 911 def _encodefilterpats(self):
912 912 return self._loadfilter('encode')
913 913
914 914 @unfilteredpropertycache
915 915 def _decodefilterpats(self):
916 916 return self._loadfilter('decode')
917 917
918 918 def adddatafilter(self, name, filter):
919 919 self._datafilters[name] = filter
920 920
921 921 def wread(self, filename):
922 922 if self._link(filename):
923 923 data = os.readlink(self.wjoin(filename))
924 924 else:
925 925 data = self.wopener.read(filename)
926 926 return self._filter(self._encodefilterpats, filename, data)
927 927
928 928 def wwrite(self, filename, data, flags):
929 929 data = self._filter(self._decodefilterpats, filename, data)
930 930 if 'l' in flags:
931 931 self.wopener.symlink(data, filename)
932 932 else:
933 933 self.wopener.write(filename, data)
934 934 if 'x' in flags:
935 935 util.setflags(self.wjoin(filename), False, True)
936 936
937 937 def wwritedata(self, filename, data):
938 938 return self._filter(self._decodefilterpats, filename, data)
939 939
940 940 def transaction(self, desc):
941 941 tr = self._transref and self._transref() or None
942 942 if tr and tr.running():
943 943 return tr.nest()
944 944
945 945 # abort here if the journal already exists
946 946 if os.path.exists(self.sjoin("journal")):
947 947 raise error.RepoError(
948 948 _("abandoned transaction found - run hg recover"))
949 949
950 950 self._writejournal(desc)
951 951 renames = [(x, undoname(x)) for x in self._journalfiles()]
952 952
953 953 tr = transaction.transaction(self.ui.warn, self.sopener,
954 954 self.sjoin("journal"),
955 955 aftertrans(renames),
956 956 self.store.createmode)
957 957 self._transref = weakref.ref(tr)
958 958 return tr
959 959
960 960 def _journalfiles(self):
961 961 return (self.sjoin('journal'), self.join('journal.dirstate'),
962 962 self.join('journal.branch'), self.join('journal.desc'),
963 963 self.join('journal.bookmarks'),
964 964 self.sjoin('journal.phaseroots'))
965 965
966 966 def undofiles(self):
967 967 return [undoname(x) for x in self._journalfiles()]
968 968
969 969 def _writejournal(self, desc):
970 970 self.opener.write("journal.dirstate",
971 971 self.opener.tryread("dirstate"))
972 972 self.opener.write("journal.branch",
973 973 encoding.fromlocal(self.dirstate.branch()))
974 974 self.opener.write("journal.desc",
975 975 "%d\n%s\n" % (len(self), desc))
976 976 self.opener.write("journal.bookmarks",
977 977 self.opener.tryread("bookmarks"))
978 978 self.sopener.write("journal.phaseroots",
979 979 self.sopener.tryread("phaseroots"))
980 980
981 981 def recover(self):
982 982 lock = self.lock()
983 983 try:
984 984 if os.path.exists(self.sjoin("journal")):
985 985 self.ui.status(_("rolling back interrupted transaction\n"))
986 986 transaction.rollback(self.sopener, self.sjoin("journal"),
987 987 self.ui.warn)
988 988 self.invalidate()
989 989 return True
990 990 else:
991 991 self.ui.warn(_("no interrupted transaction available\n"))
992 992 return False
993 993 finally:
994 994 lock.release()
995 995
996 996 def rollback(self, dryrun=False, force=False):
997 997 wlock = lock = None
998 998 try:
999 999 wlock = self.wlock()
1000 1000 lock = self.lock()
1001 1001 if os.path.exists(self.sjoin("undo")):
1002 1002 return self._rollback(dryrun, force)
1003 1003 else:
1004 1004 self.ui.warn(_("no rollback information available\n"))
1005 1005 return 1
1006 1006 finally:
1007 1007 release(lock, wlock)
1008 1008
1009 @unfilteredmeth # Until we get smarter cache management
1009 @unfilteredmethod # Until we get smarter cache management
1010 1010 def _rollback(self, dryrun, force):
1011 1011 ui = self.ui
1012 1012 try:
1013 1013 args = self.opener.read('undo.desc').splitlines()
1014 1014 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1015 1015 if len(args) >= 3:
1016 1016 detail = args[2]
1017 1017 oldtip = oldlen - 1
1018 1018
1019 1019 if detail and ui.verbose:
1020 1020 msg = (_('repository tip rolled back to revision %s'
1021 1021 ' (undo %s: %s)\n')
1022 1022 % (oldtip, desc, detail))
1023 1023 else:
1024 1024 msg = (_('repository tip rolled back to revision %s'
1025 1025 ' (undo %s)\n')
1026 1026 % (oldtip, desc))
1027 1027 except IOError:
1028 1028 msg = _('rolling back unknown transaction\n')
1029 1029 desc = None
1030 1030
1031 1031 if not force and self['.'] != self['tip'] and desc == 'commit':
1032 1032 raise util.Abort(
1033 1033 _('rollback of last commit while not checked out '
1034 1034 'may lose data'), hint=_('use -f to force'))
1035 1035
1036 1036 ui.status(msg)
1037 1037 if dryrun:
1038 1038 return 0
1039 1039
1040 1040 parents = self.dirstate.parents()
1041 1041 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1042 1042 if os.path.exists(self.join('undo.bookmarks')):
1043 1043 util.rename(self.join('undo.bookmarks'),
1044 1044 self.join('bookmarks'))
1045 1045 if os.path.exists(self.sjoin('undo.phaseroots')):
1046 1046 util.rename(self.sjoin('undo.phaseroots'),
1047 1047 self.sjoin('phaseroots'))
1048 1048 self.invalidate()
1049 1049
1050 1050 # Discard all cache entries to force reloading everything.
1051 1051 self._filecache.clear()
1052 1052
1053 1053 parentgone = (parents[0] not in self.changelog.nodemap or
1054 1054 parents[1] not in self.changelog.nodemap)
1055 1055 if parentgone:
1056 1056 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1057 1057 try:
1058 1058 branch = self.opener.read('undo.branch')
1059 1059 self.dirstate.setbranch(encoding.tolocal(branch))
1060 1060 except IOError:
1061 1061 ui.warn(_('named branch could not be reset: '
1062 1062 'current branch is still \'%s\'\n')
1063 1063 % self.dirstate.branch())
1064 1064
1065 1065 self.dirstate.invalidate()
1066 1066 parents = tuple([p.rev() for p in self.parents()])
1067 1067 if len(parents) > 1:
1068 1068 ui.status(_('working directory now based on '
1069 1069 'revisions %d and %d\n') % parents)
1070 1070 else:
1071 1071 ui.status(_('working directory now based on '
1072 1072 'revision %d\n') % parents)
1073 1073 # TODO: if we know which new heads may result from this rollback, pass
1074 1074 # them to destroy(), which will prevent the branchhead cache from being
1075 1075 # invalidated.
1076 1076 self.destroyed()
1077 1077 return 0
1078 1078
1079 1079 def invalidatecaches(self):
1080 1080
1081 1081 if '_tagscache' in vars(self):
1082 1082 # can't use delattr on proxy
1083 1083 del self.__dict__['_tagscache']
1084 1084
1085 1085 self.unfiltered()._branchcache = None # in UTF-8
1086 1086 self.unfiltered()._branchcachetip = None
1087 1087 obsolete.clearobscaches(self)
1088 1088
1089 1089 def invalidatedirstate(self):
1090 1090 '''Invalidates the dirstate, causing the next call to dirstate
1091 1091 to check if it was modified since the last time it was read,
1092 1092 rereading it if it has.
1093 1093
1094 1094 This is different to dirstate.invalidate() that it doesn't always
1095 1095 rereads the dirstate. Use dirstate.invalidate() if you want to
1096 1096 explicitly read the dirstate again (i.e. restoring it to a previous
1097 1097 known good state).'''
1098 1098 if hasunfilteredcache(self, 'dirstate'):
1099 1099 for k in self.dirstate._filecache:
1100 1100 try:
1101 1101 delattr(self.dirstate, k)
1102 1102 except AttributeError:
1103 1103 pass
1104 1104 delattr(self.unfiltered(), 'dirstate')
1105 1105
1106 1106 def invalidate(self):
1107 1107 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1108 1108 for k in self._filecache:
1109 1109 # dirstate is invalidated separately in invalidatedirstate()
1110 1110 if k == 'dirstate':
1111 1111 continue
1112 1112
1113 1113 try:
1114 1114 delattr(unfiltered, k)
1115 1115 except AttributeError:
1116 1116 pass
1117 1117 self.invalidatecaches()
1118 1118
1119 1119 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1120 1120 try:
1121 1121 l = lock.lock(lockname, 0, releasefn, desc=desc)
1122 1122 except error.LockHeld, inst:
1123 1123 if not wait:
1124 1124 raise
1125 1125 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1126 1126 (desc, inst.locker))
1127 1127 # default to 600 seconds timeout
1128 1128 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1129 1129 releasefn, desc=desc)
1130 1130 if acquirefn:
1131 1131 acquirefn()
1132 1132 return l
1133 1133
1134 1134 def _afterlock(self, callback):
1135 1135 """add a callback to the current repository lock.
1136 1136
1137 1137 The callback will be executed on lock release."""
1138 1138 l = self._lockref and self._lockref()
1139 1139 if l:
1140 1140 l.postrelease.append(callback)
1141 1141 else:
1142 1142 callback()
1143 1143
1144 1144 def lock(self, wait=True):
1145 1145 '''Lock the repository store (.hg/store) and return a weak reference
1146 1146 to the lock. Use this before modifying the store (e.g. committing or
1147 1147 stripping). If you are opening a transaction, get a lock as well.)'''
1148 1148 l = self._lockref and self._lockref()
1149 1149 if l is not None and l.held:
1150 1150 l.lock()
1151 1151 return l
1152 1152
1153 1153 def unlock():
1154 1154 self.store.write()
1155 1155 if hasunfilteredcache(self, '_phasecache'):
1156 1156 self._phasecache.write()
1157 1157 for k, ce in self._filecache.items():
1158 1158 if k == 'dirstate':
1159 1159 continue
1160 1160 ce.refresh()
1161 1161
1162 1162 l = self._lock(self.sjoin("lock"), wait, unlock,
1163 1163 self.invalidate, _('repository %s') % self.origroot)
1164 1164 self._lockref = weakref.ref(l)
1165 1165 return l
1166 1166
1167 1167 def wlock(self, wait=True):
1168 1168 '''Lock the non-store parts of the repository (everything under
1169 1169 .hg except .hg/store) and return a weak reference to the lock.
1170 1170 Use this before modifying files in .hg.'''
1171 1171 l = self._wlockref and self._wlockref()
1172 1172 if l is not None and l.held:
1173 1173 l.lock()
1174 1174 return l
1175 1175
1176 1176 def unlock():
1177 1177 self.dirstate.write()
1178 1178 ce = self._filecache.get('dirstate')
1179 1179 if ce:
1180 1180 ce.refresh()
1181 1181
1182 1182 l = self._lock(self.join("wlock"), wait, unlock,
1183 1183 self.invalidatedirstate, _('working directory of %s') %
1184 1184 self.origroot)
1185 1185 self._wlockref = weakref.ref(l)
1186 1186 return l
1187 1187
1188 1188 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1189 1189 """
1190 1190 commit an individual file as part of a larger transaction
1191 1191 """
1192 1192
1193 1193 fname = fctx.path()
1194 1194 text = fctx.data()
1195 1195 flog = self.file(fname)
1196 1196 fparent1 = manifest1.get(fname, nullid)
1197 1197 fparent2 = fparent2o = manifest2.get(fname, nullid)
1198 1198
1199 1199 meta = {}
1200 1200 copy = fctx.renamed()
1201 1201 if copy and copy[0] != fname:
1202 1202 # Mark the new revision of this file as a copy of another
1203 1203 # file. This copy data will effectively act as a parent
1204 1204 # of this new revision. If this is a merge, the first
1205 1205 # parent will be the nullid (meaning "look up the copy data")
1206 1206 # and the second one will be the other parent. For example:
1207 1207 #
1208 1208 # 0 --- 1 --- 3 rev1 changes file foo
1209 1209 # \ / rev2 renames foo to bar and changes it
1210 1210 # \- 2 -/ rev3 should have bar with all changes and
1211 1211 # should record that bar descends from
1212 1212 # bar in rev2 and foo in rev1
1213 1213 #
1214 1214 # this allows this merge to succeed:
1215 1215 #
1216 1216 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1217 1217 # \ / merging rev3 and rev4 should use bar@rev2
1218 1218 # \- 2 --- 4 as the merge base
1219 1219 #
1220 1220
1221 1221 cfname = copy[0]
1222 1222 crev = manifest1.get(cfname)
1223 1223 newfparent = fparent2
1224 1224
1225 1225 if manifest2: # branch merge
1226 1226 if fparent2 == nullid or crev is None: # copied on remote side
1227 1227 if cfname in manifest2:
1228 1228 crev = manifest2[cfname]
1229 1229 newfparent = fparent1
1230 1230
1231 1231 # find source in nearest ancestor if we've lost track
1232 1232 if not crev:
1233 1233 self.ui.debug(" %s: searching for copy revision for %s\n" %
1234 1234 (fname, cfname))
1235 1235 for ancestor in self[None].ancestors():
1236 1236 if cfname in ancestor:
1237 1237 crev = ancestor[cfname].filenode()
1238 1238 break
1239 1239
1240 1240 if crev:
1241 1241 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1242 1242 meta["copy"] = cfname
1243 1243 meta["copyrev"] = hex(crev)
1244 1244 fparent1, fparent2 = nullid, newfparent
1245 1245 else:
1246 1246 self.ui.warn(_("warning: can't find ancestor for '%s' "
1247 1247 "copied from '%s'!\n") % (fname, cfname))
1248 1248
1249 1249 elif fparent2 != nullid:
1250 1250 # is one parent an ancestor of the other?
1251 1251 fparentancestor = flog.ancestor(fparent1, fparent2)
1252 1252 if fparentancestor == fparent1:
1253 1253 fparent1, fparent2 = fparent2, nullid
1254 1254 elif fparentancestor == fparent2:
1255 1255 fparent2 = nullid
1256 1256
1257 1257 # is the file changed?
1258 1258 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1259 1259 changelist.append(fname)
1260 1260 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1261 1261
1262 1262 # are just the flags changed during merge?
1263 1263 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1264 1264 changelist.append(fname)
1265 1265
1266 1266 return fparent1
1267 1267
1268 @unfilteredmeth
1268 @unfilteredmethod
1269 1269 def commit(self, text="", user=None, date=None, match=None, force=False,
1270 1270 editor=False, extra={}):
1271 1271 """Add a new revision to current repository.
1272 1272
1273 1273 Revision information is gathered from the working directory,
1274 1274 match can be used to filter the committed files. If editor is
1275 1275 supplied, it is called to get a commit message.
1276 1276 """
1277 1277
1278 1278 def fail(f, msg):
1279 1279 raise util.Abort('%s: %s' % (f, msg))
1280 1280
1281 1281 if not match:
1282 1282 match = matchmod.always(self.root, '')
1283 1283
1284 1284 if not force:
1285 1285 vdirs = []
1286 1286 match.dir = vdirs.append
1287 1287 match.bad = fail
1288 1288
1289 1289 wlock = self.wlock()
1290 1290 try:
1291 1291 wctx = self[None]
1292 1292 merge = len(wctx.parents()) > 1
1293 1293
1294 1294 if (not force and merge and match and
1295 1295 (match.files() or match.anypats())):
1296 1296 raise util.Abort(_('cannot partially commit a merge '
1297 1297 '(do not specify files or patterns)'))
1298 1298
1299 1299 changes = self.status(match=match, clean=force)
1300 1300 if force:
1301 1301 changes[0].extend(changes[6]) # mq may commit unchanged files
1302 1302
1303 1303 # check subrepos
1304 1304 subs = []
1305 1305 commitsubs = set()
1306 1306 newstate = wctx.substate.copy()
1307 1307 # only manage subrepos and .hgsubstate if .hgsub is present
1308 1308 if '.hgsub' in wctx:
1309 1309 # we'll decide whether to track this ourselves, thanks
1310 1310 if '.hgsubstate' in changes[0]:
1311 1311 changes[0].remove('.hgsubstate')
1312 1312 if '.hgsubstate' in changes[2]:
1313 1313 changes[2].remove('.hgsubstate')
1314 1314
1315 1315 # compare current state to last committed state
1316 1316 # build new substate based on last committed state
1317 1317 oldstate = wctx.p1().substate
1318 1318 for s in sorted(newstate.keys()):
1319 1319 if not match(s):
1320 1320 # ignore working copy, use old state if present
1321 1321 if s in oldstate:
1322 1322 newstate[s] = oldstate[s]
1323 1323 continue
1324 1324 if not force:
1325 1325 raise util.Abort(
1326 1326 _("commit with new subrepo %s excluded") % s)
1327 1327 if wctx.sub(s).dirty(True):
1328 1328 if not self.ui.configbool('ui', 'commitsubrepos'):
1329 1329 raise util.Abort(
1330 1330 _("uncommitted changes in subrepo %s") % s,
1331 1331 hint=_("use --subrepos for recursive commit"))
1332 1332 subs.append(s)
1333 1333 commitsubs.add(s)
1334 1334 else:
1335 1335 bs = wctx.sub(s).basestate()
1336 1336 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1337 1337 if oldstate.get(s, (None, None, None))[1] != bs:
1338 1338 subs.append(s)
1339 1339
1340 1340 # check for removed subrepos
1341 1341 for p in wctx.parents():
1342 1342 r = [s for s in p.substate if s not in newstate]
1343 1343 subs += [s for s in r if match(s)]
1344 1344 if subs:
1345 1345 if (not match('.hgsub') and
1346 1346 '.hgsub' in (wctx.modified() + wctx.added())):
1347 1347 raise util.Abort(
1348 1348 _("can't commit subrepos without .hgsub"))
1349 1349 changes[0].insert(0, '.hgsubstate')
1350 1350
1351 1351 elif '.hgsub' in changes[2]:
1352 1352 # clean up .hgsubstate when .hgsub is removed
1353 1353 if ('.hgsubstate' in wctx and
1354 1354 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1355 1355 changes[2].insert(0, '.hgsubstate')
1356 1356
1357 1357 # make sure all explicit patterns are matched
1358 1358 if not force and match.files():
1359 1359 matched = set(changes[0] + changes[1] + changes[2])
1360 1360
1361 1361 for f in match.files():
1362 1362 f = self.dirstate.normalize(f)
1363 1363 if f == '.' or f in matched or f in wctx.substate:
1364 1364 continue
1365 1365 if f in changes[3]: # missing
1366 1366 fail(f, _('file not found!'))
1367 1367 if f in vdirs: # visited directory
1368 1368 d = f + '/'
1369 1369 for mf in matched:
1370 1370 if mf.startswith(d):
1371 1371 break
1372 1372 else:
1373 1373 fail(f, _("no match under directory!"))
1374 1374 elif f not in self.dirstate:
1375 1375 fail(f, _("file not tracked!"))
1376 1376
1377 1377 if (not force and not extra.get("close") and not merge
1378 1378 and not (changes[0] or changes[1] or changes[2])
1379 1379 and wctx.branch() == wctx.p1().branch()):
1380 1380 return None
1381 1381
1382 1382 if merge and changes[3]:
1383 1383 raise util.Abort(_("cannot commit merge with missing files"))
1384 1384
1385 1385 ms = mergemod.mergestate(self)
1386 1386 for f in changes[0]:
1387 1387 if f in ms and ms[f] == 'u':
1388 1388 raise util.Abort(_("unresolved merge conflicts "
1389 1389 "(see hg help resolve)"))
1390 1390
1391 1391 cctx = context.workingctx(self, text, user, date, extra, changes)
1392 1392 if editor:
1393 1393 cctx._text = editor(self, cctx, subs)
1394 1394 edited = (text != cctx._text)
1395 1395
1396 1396 # commit subs and write new state
1397 1397 if subs:
1398 1398 for s in sorted(commitsubs):
1399 1399 sub = wctx.sub(s)
1400 1400 self.ui.status(_('committing subrepository %s\n') %
1401 1401 subrepo.subrelpath(sub))
1402 1402 sr = sub.commit(cctx._text, user, date)
1403 1403 newstate[s] = (newstate[s][0], sr)
1404 1404 subrepo.writestate(self, newstate)
1405 1405
1406 1406 # Save commit message in case this transaction gets rolled back
1407 1407 # (e.g. by a pretxncommit hook). Leave the content alone on
1408 1408 # the assumption that the user will use the same editor again.
1409 1409 msgfn = self.savecommitmessage(cctx._text)
1410 1410
1411 1411 p1, p2 = self.dirstate.parents()
1412 1412 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1413 1413 try:
1414 1414 self.hook("precommit", throw=True, parent1=hookp1,
1415 1415 parent2=hookp2)
1416 1416 ret = self.commitctx(cctx, True)
1417 1417 except: # re-raises
1418 1418 if edited:
1419 1419 self.ui.write(
1420 1420 _('note: commit message saved in %s\n') % msgfn)
1421 1421 raise
1422 1422
1423 1423 # update bookmarks, dirstate and mergestate
1424 1424 bookmarks.update(self, [p1, p2], ret)
1425 1425 for f in changes[0] + changes[1]:
1426 1426 self.dirstate.normal(f)
1427 1427 for f in changes[2]:
1428 1428 self.dirstate.drop(f)
1429 1429 self.dirstate.setparents(ret)
1430 1430 ms.reset()
1431 1431 finally:
1432 1432 wlock.release()
1433 1433
1434 1434 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1435 1435 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1436 1436 self._afterlock(commithook)
1437 1437 return ret
1438 1438
1439 @unfilteredmeth
1439 @unfilteredmethod
1440 1440 def commitctx(self, ctx, error=False):
1441 1441 """Add a new revision to current repository.
1442 1442 Revision information is passed via the context argument.
1443 1443 """
1444 1444
1445 1445 tr = lock = None
1446 1446 removed = list(ctx.removed())
1447 1447 p1, p2 = ctx.p1(), ctx.p2()
1448 1448 user = ctx.user()
1449 1449
1450 1450 lock = self.lock()
1451 1451 try:
1452 1452 tr = self.transaction("commit")
1453 1453 trp = weakref.proxy(tr)
1454 1454
1455 1455 if ctx.files():
1456 1456 m1 = p1.manifest().copy()
1457 1457 m2 = p2.manifest()
1458 1458
1459 1459 # check in files
1460 1460 new = {}
1461 1461 changed = []
1462 1462 linkrev = len(self)
1463 1463 for f in sorted(ctx.modified() + ctx.added()):
1464 1464 self.ui.note(f + "\n")
1465 1465 try:
1466 1466 fctx = ctx[f]
1467 1467 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1468 1468 changed)
1469 1469 m1.set(f, fctx.flags())
1470 1470 except OSError, inst:
1471 1471 self.ui.warn(_("trouble committing %s!\n") % f)
1472 1472 raise
1473 1473 except IOError, inst:
1474 1474 errcode = getattr(inst, 'errno', errno.ENOENT)
1475 1475 if error or errcode and errcode != errno.ENOENT:
1476 1476 self.ui.warn(_("trouble committing %s!\n") % f)
1477 1477 raise
1478 1478 else:
1479 1479 removed.append(f)
1480 1480
1481 1481 # update manifest
1482 1482 m1.update(new)
1483 1483 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1484 1484 drop = [f for f in removed if f in m1]
1485 1485 for f in drop:
1486 1486 del m1[f]
1487 1487 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1488 1488 p2.manifestnode(), (new, drop))
1489 1489 files = changed + removed
1490 1490 else:
1491 1491 mn = p1.manifestnode()
1492 1492 files = []
1493 1493
1494 1494 # update changelog
1495 1495 self.changelog.delayupdate()
1496 1496 n = self.changelog.add(mn, files, ctx.description(),
1497 1497 trp, p1.node(), p2.node(),
1498 1498 user, ctx.date(), ctx.extra().copy())
1499 1499 p = lambda: self.changelog.writepending() and self.root or ""
1500 1500 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1501 1501 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1502 1502 parent2=xp2, pending=p)
1503 1503 self.changelog.finalize(trp)
1504 1504 # set the new commit is proper phase
1505 1505 targetphase = phases.newcommitphase(self.ui)
1506 1506 if targetphase:
1507 1507 # retract boundary do not alter parent changeset.
1508 1508 # if a parent have higher the resulting phase will
1509 1509 # be compliant anyway
1510 1510 #
1511 1511 # if minimal phase was 0 we don't need to retract anything
1512 1512 phases.retractboundary(self, targetphase, [n])
1513 1513 tr.close()
1514 1514 self.updatebranchcache()
1515 1515 return n
1516 1516 finally:
1517 1517 if tr:
1518 1518 tr.release()
1519 1519 lock.release()
1520 1520
1521 @unfilteredmeth
1521 @unfilteredmethod
1522 1522 def destroyed(self, newheadnodes=None):
1523 1523 '''Inform the repository that nodes have been destroyed.
1524 1524 Intended for use by strip and rollback, so there's a common
1525 1525 place for anything that has to be done after destroying history.
1526 1526
1527 1527 If you know the branchheadcache was uptodate before nodes were removed
1528 1528 and you also know the set of candidate new heads that may have resulted
1529 1529 from the destruction, you can set newheadnodes. This will enable the
1530 1530 code to update the branchheads cache, rather than having future code
1531 1531 decide it's invalid and regenerating it from scratch.
1532 1532 '''
1533 1533 # If we have info, newheadnodes, on how to update the branch cache, do
1534 1534 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1535 1535 # will be caught the next time it is read.
1536 1536 if newheadnodes:
1537 1537 tiprev = len(self) - 1
1538 1538 ctxgen = (self[node] for node in newheadnodes
1539 1539 if self.changelog.hasnode(node))
1540 1540 self._updatebranchcache(self._branchcache, ctxgen)
1541 1541 self._writebranchcache(self._branchcache, self.changelog.tip(),
1542 1542 tiprev)
1543 1543
1544 1544 # Ensure the persistent tag cache is updated. Doing it now
1545 1545 # means that the tag cache only has to worry about destroyed
1546 1546 # heads immediately after a strip/rollback. That in turn
1547 1547 # guarantees that "cachetip == currenttip" (comparing both rev
1548 1548 # and node) always means no nodes have been added or destroyed.
1549 1549
1550 1550 # XXX this is suboptimal when qrefresh'ing: we strip the current
1551 1551 # head, refresh the tag cache, then immediately add a new head.
1552 1552 # But I think doing it this way is necessary for the "instant
1553 1553 # tag cache retrieval" case to work.
1554 1554 self.invalidatecaches()
1555 1555
1556 1556 # Discard all cache entries to force reloading everything.
1557 1557 self._filecache.clear()
1558 1558
1559 1559 def walk(self, match, node=None):
1560 1560 '''
1561 1561 walk recursively through the directory tree or a given
1562 1562 changeset, finding all files matched by the match
1563 1563 function
1564 1564 '''
1565 1565 return self[node].walk(match)
1566 1566
1567 1567 def status(self, node1='.', node2=None, match=None,
1568 1568 ignored=False, clean=False, unknown=False,
1569 1569 listsubrepos=False):
1570 1570 """return status of files between two nodes or node and working
1571 1571 directory.
1572 1572
1573 1573 If node1 is None, use the first dirstate parent instead.
1574 1574 If node2 is None, compare node1 with working directory.
1575 1575 """
1576 1576
1577 1577 def mfmatches(ctx):
1578 1578 mf = ctx.manifest().copy()
1579 1579 if match.always():
1580 1580 return mf
1581 1581 for fn in mf.keys():
1582 1582 if not match(fn):
1583 1583 del mf[fn]
1584 1584 return mf
1585 1585
1586 1586 if isinstance(node1, context.changectx):
1587 1587 ctx1 = node1
1588 1588 else:
1589 1589 ctx1 = self[node1]
1590 1590 if isinstance(node2, context.changectx):
1591 1591 ctx2 = node2
1592 1592 else:
1593 1593 ctx2 = self[node2]
1594 1594
1595 1595 working = ctx2.rev() is None
1596 1596 parentworking = working and ctx1 == self['.']
1597 1597 match = match or matchmod.always(self.root, self.getcwd())
1598 1598 listignored, listclean, listunknown = ignored, clean, unknown
1599 1599
1600 1600 # load earliest manifest first for caching reasons
1601 1601 if not working and ctx2.rev() < ctx1.rev():
1602 1602 ctx2.manifest()
1603 1603
1604 1604 if not parentworking:
1605 1605 def bad(f, msg):
1606 1606 # 'f' may be a directory pattern from 'match.files()',
1607 1607 # so 'f not in ctx1' is not enough
1608 1608 if f not in ctx1 and f not in ctx1.dirs():
1609 1609 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1610 1610 match.bad = bad
1611 1611
1612 1612 if working: # we need to scan the working dir
1613 1613 subrepos = []
1614 1614 if '.hgsub' in self.dirstate:
1615 1615 subrepos = ctx2.substate.keys()
1616 1616 s = self.dirstate.status(match, subrepos, listignored,
1617 1617 listclean, listunknown)
1618 1618 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1619 1619
1620 1620 # check for any possibly clean files
1621 1621 if parentworking and cmp:
1622 1622 fixup = []
1623 1623 # do a full compare of any files that might have changed
1624 1624 for f in sorted(cmp):
1625 1625 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1626 1626 or ctx1[f].cmp(ctx2[f])):
1627 1627 modified.append(f)
1628 1628 else:
1629 1629 fixup.append(f)
1630 1630
1631 1631 # update dirstate for files that are actually clean
1632 1632 if fixup:
1633 1633 if listclean:
1634 1634 clean += fixup
1635 1635
1636 1636 try:
1637 1637 # updating the dirstate is optional
1638 1638 # so we don't wait on the lock
1639 1639 wlock = self.wlock(False)
1640 1640 try:
1641 1641 for f in fixup:
1642 1642 self.dirstate.normal(f)
1643 1643 finally:
1644 1644 wlock.release()
1645 1645 except error.LockError:
1646 1646 pass
1647 1647
1648 1648 if not parentworking:
1649 1649 mf1 = mfmatches(ctx1)
1650 1650 if working:
1651 1651 # we are comparing working dir against non-parent
1652 1652 # generate a pseudo-manifest for the working dir
1653 1653 mf2 = mfmatches(self['.'])
1654 1654 for f in cmp + modified + added:
1655 1655 mf2[f] = None
1656 1656 mf2.set(f, ctx2.flags(f))
1657 1657 for f in removed:
1658 1658 if f in mf2:
1659 1659 del mf2[f]
1660 1660 else:
1661 1661 # we are comparing two revisions
1662 1662 deleted, unknown, ignored = [], [], []
1663 1663 mf2 = mfmatches(ctx2)
1664 1664
1665 1665 modified, added, clean = [], [], []
1666 1666 withflags = mf1.withflags() | mf2.withflags()
1667 1667 for fn in mf2:
1668 1668 if fn in mf1:
1669 1669 if (fn not in deleted and
1670 1670 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1671 1671 (mf1[fn] != mf2[fn] and
1672 1672 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1673 1673 modified.append(fn)
1674 1674 elif listclean:
1675 1675 clean.append(fn)
1676 1676 del mf1[fn]
1677 1677 elif fn not in deleted:
1678 1678 added.append(fn)
1679 1679 removed = mf1.keys()
1680 1680
1681 1681 if working and modified and not self.dirstate._checklink:
1682 1682 # Symlink placeholders may get non-symlink-like contents
1683 1683 # via user error or dereferencing by NFS or Samba servers,
1684 1684 # so we filter out any placeholders that don't look like a
1685 1685 # symlink
1686 1686 sane = []
1687 1687 for f in modified:
1688 1688 if ctx2.flags(f) == 'l':
1689 1689 d = ctx2[f].data()
1690 1690 if len(d) >= 1024 or '\n' in d or util.binary(d):
1691 1691 self.ui.debug('ignoring suspect symlink placeholder'
1692 1692 ' "%s"\n' % f)
1693 1693 continue
1694 1694 sane.append(f)
1695 1695 modified = sane
1696 1696
1697 1697 r = modified, added, removed, deleted, unknown, ignored, clean
1698 1698
1699 1699 if listsubrepos:
1700 1700 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1701 1701 if working:
1702 1702 rev2 = None
1703 1703 else:
1704 1704 rev2 = ctx2.substate[subpath][1]
1705 1705 try:
1706 1706 submatch = matchmod.narrowmatcher(subpath, match)
1707 1707 s = sub.status(rev2, match=submatch, ignored=listignored,
1708 1708 clean=listclean, unknown=listunknown,
1709 1709 listsubrepos=True)
1710 1710 for rfiles, sfiles in zip(r, s):
1711 1711 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1712 1712 except error.LookupError:
1713 1713 self.ui.status(_("skipping missing subrepository: %s\n")
1714 1714 % subpath)
1715 1715
1716 1716 for l in r:
1717 1717 l.sort()
1718 1718 return r
1719 1719
1720 1720 def heads(self, start=None):
1721 1721 heads = self.changelog.heads(start)
1722 1722 # sort the output in rev descending order
1723 1723 return sorted(heads, key=self.changelog.rev, reverse=True)
1724 1724
1725 1725 def branchheads(self, branch=None, start=None, closed=False):
1726 1726 '''return a (possibly filtered) list of heads for the given branch
1727 1727
1728 1728 Heads are returned in topological order, from newest to oldest.
1729 1729 If branch is None, use the dirstate branch.
1730 1730 If start is not None, return only heads reachable from start.
1731 1731 If closed is True, return heads that are marked as closed as well.
1732 1732 '''
1733 1733 if branch is None:
1734 1734 branch = self[None].branch()
1735 1735 branches = self.branchmap()
1736 1736 if branch not in branches:
1737 1737 return []
1738 1738 # the cache returns heads ordered lowest to highest
1739 1739 bheads = list(reversed(branches[branch]))
1740 1740 if start is not None:
1741 1741 # filter out the heads that cannot be reached from startrev
1742 1742 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1743 1743 bheads = [h for h in bheads if h in fbheads]
1744 1744 if not closed:
1745 1745 bheads = [h for h in bheads if not self[h].closesbranch()]
1746 1746 return bheads
1747 1747
1748 1748 def branches(self, nodes):
1749 1749 if not nodes:
1750 1750 nodes = [self.changelog.tip()]
1751 1751 b = []
1752 1752 for n in nodes:
1753 1753 t = n
1754 1754 while True:
1755 1755 p = self.changelog.parents(n)
1756 1756 if p[1] != nullid or p[0] == nullid:
1757 1757 b.append((t, n, p[0], p[1]))
1758 1758 break
1759 1759 n = p[0]
1760 1760 return b
1761 1761
1762 1762 def between(self, pairs):
1763 1763 r = []
1764 1764
1765 1765 for top, bottom in pairs:
1766 1766 n, l, i = top, [], 0
1767 1767 f = 1
1768 1768
1769 1769 while n != bottom and n != nullid:
1770 1770 p = self.changelog.parents(n)[0]
1771 1771 if i == f:
1772 1772 l.append(n)
1773 1773 f = f * 2
1774 1774 n = p
1775 1775 i += 1
1776 1776
1777 1777 r.append(l)
1778 1778
1779 1779 return r
1780 1780
1781 1781 def pull(self, remote, heads=None, force=False):
1782 1782 # don't open transaction for nothing or you break future useful
1783 1783 # rollback call
1784 1784 tr = None
1785 1785 trname = 'pull\n' + util.hidepassword(remote.url())
1786 1786 lock = self.lock()
1787 1787 try:
1788 1788 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1789 1789 force=force)
1790 1790 common, fetch, rheads = tmp
1791 1791 if not fetch:
1792 1792 self.ui.status(_("no changes found\n"))
1793 1793 added = []
1794 1794 result = 0
1795 1795 else:
1796 1796 tr = self.transaction(trname)
1797 1797 if heads is None and list(common) == [nullid]:
1798 1798 self.ui.status(_("requesting all changes\n"))
1799 1799 elif heads is None and remote.capable('changegroupsubset'):
1800 1800 # issue1320, avoid a race if remote changed after discovery
1801 1801 heads = rheads
1802 1802
1803 1803 if remote.capable('getbundle'):
1804 1804 cg = remote.getbundle('pull', common=common,
1805 1805 heads=heads or rheads)
1806 1806 elif heads is None:
1807 1807 cg = remote.changegroup(fetch, 'pull')
1808 1808 elif not remote.capable('changegroupsubset'):
1809 1809 raise util.Abort(_("partial pull cannot be done because "
1810 1810 "other repository doesn't support "
1811 1811 "changegroupsubset."))
1812 1812 else:
1813 1813 cg = remote.changegroupsubset(fetch, heads, 'pull')
1814 1814 clstart = len(self.changelog)
1815 1815 result = self.addchangegroup(cg, 'pull', remote.url())
1816 1816 clend = len(self.changelog)
1817 1817 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1818 1818
1819 1819 # compute target subset
1820 1820 if heads is None:
1821 1821 # We pulled every thing possible
1822 1822 # sync on everything common
1823 1823 subset = common + added
1824 1824 else:
1825 1825 # We pulled a specific subset
1826 1826 # sync on this subset
1827 1827 subset = heads
1828 1828
1829 1829 # Get remote phases data from remote
1830 1830 remotephases = remote.listkeys('phases')
1831 1831 publishing = bool(remotephases.get('publishing', False))
1832 1832 if remotephases and not publishing:
1833 1833 # remote is new and unpublishing
1834 1834 pheads, _dr = phases.analyzeremotephases(self, subset,
1835 1835 remotephases)
1836 1836 phases.advanceboundary(self, phases.public, pheads)
1837 1837 phases.advanceboundary(self, phases.draft, subset)
1838 1838 else:
1839 1839 # Remote is old or publishing all common changesets
1840 1840 # should be seen as public
1841 1841 phases.advanceboundary(self, phases.public, subset)
1842 1842
1843 1843 if obsolete._enabled:
1844 1844 self.ui.debug('fetching remote obsolete markers\n')
1845 1845 remoteobs = remote.listkeys('obsolete')
1846 1846 if 'dump0' in remoteobs:
1847 1847 if tr is None:
1848 1848 tr = self.transaction(trname)
1849 1849 for key in sorted(remoteobs, reverse=True):
1850 1850 if key.startswith('dump'):
1851 1851 data = base85.b85decode(remoteobs[key])
1852 1852 self.obsstore.mergemarkers(tr, data)
1853 1853 if tr is not None:
1854 1854 tr.close()
1855 1855 finally:
1856 1856 if tr is not None:
1857 1857 tr.release()
1858 1858 lock.release()
1859 1859
1860 1860 return result
1861 1861
1862 1862 def checkpush(self, force, revs):
1863 1863 """Extensions can override this function if additional checks have
1864 1864 to be performed before pushing, or call it if they override push
1865 1865 command.
1866 1866 """
1867 1867 pass
1868 1868
1869 1869 def push(self, remote, force=False, revs=None, newbranch=False):
1870 1870 '''Push outgoing changesets (limited by revs) from the current
1871 1871 repository to remote. Return an integer:
1872 1872 - None means nothing to push
1873 1873 - 0 means HTTP error
1874 1874 - 1 means we pushed and remote head count is unchanged *or*
1875 1875 we have outgoing changesets but refused to push
1876 1876 - other values as described by addchangegroup()
1877 1877 '''
1878 1878 # there are two ways to push to remote repo:
1879 1879 #
1880 1880 # addchangegroup assumes local user can lock remote
1881 1881 # repo (local filesystem, old ssh servers).
1882 1882 #
1883 1883 # unbundle assumes local user cannot lock remote repo (new ssh
1884 1884 # servers, http servers).
1885 1885
1886 1886 if not remote.canpush():
1887 1887 raise util.Abort(_("destination does not support push"))
1888 1888 # get local lock as we might write phase data
1889 1889 unfi = self.unfiltered()
1890 1890 locallock = self.lock()
1891 1891 try:
1892 1892 self.checkpush(force, revs)
1893 1893 lock = None
1894 1894 unbundle = remote.capable('unbundle')
1895 1895 if not unbundle:
1896 1896 lock = remote.lock()
1897 1897 try:
1898 1898 # discovery
1899 1899 fci = discovery.findcommonincoming
1900 1900 commoninc = fci(unfi, remote, force=force)
1901 1901 common, inc, remoteheads = commoninc
1902 1902 fco = discovery.findcommonoutgoing
1903 1903 outgoing = fco(unfi, remote, onlyheads=revs,
1904 1904 commoninc=commoninc, force=force)
1905 1905
1906 1906
1907 1907 if not outgoing.missing:
1908 1908 # nothing to push
1909 1909 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1910 1910 ret = None
1911 1911 else:
1912 1912 # something to push
1913 1913 if not force:
1914 1914 # if self.obsstore == False --> no obsolete
1915 1915 # then, save the iteration
1916 1916 if unfi.obsstore:
1917 1917 # this message are here for 80 char limit reason
1918 1918 mso = _("push includes obsolete changeset: %s!")
1919 1919 msu = _("push includes unstable changeset: %s!")
1920 1920 msb = _("push includes bumped changeset: %s!")
1921 1921 # If we are to push if there is at least one
1922 1922 # obsolete or unstable changeset in missing, at
1923 1923 # least one of the missinghead will be obsolete or
1924 1924 # unstable. So checking heads only is ok
1925 1925 for node in outgoing.missingheads:
1926 1926 ctx = unfi[node]
1927 1927 if ctx.obsolete():
1928 1928 raise util.Abort(mso % ctx)
1929 1929 elif ctx.unstable():
1930 1930 raise util.Abort(msu % ctx)
1931 1931 elif ctx.bumped():
1932 1932 raise util.Abort(msb % ctx)
1933 1933 discovery.checkheads(unfi, remote, outgoing,
1934 1934 remoteheads, newbranch,
1935 1935 bool(inc))
1936 1936
1937 1937 # create a changegroup from local
1938 1938 if revs is None and not outgoing.excluded:
1939 1939 # push everything,
1940 1940 # use the fast path, no race possible on push
1941 1941 cg = self._changegroup(outgoing.missing, 'push')
1942 1942 else:
1943 1943 cg = self.getlocalbundle('push', outgoing)
1944 1944
1945 1945 # apply changegroup to remote
1946 1946 if unbundle:
1947 1947 # local repo finds heads on server, finds out what
1948 1948 # revs it must push. once revs transferred, if server
1949 1949 # finds it has different heads (someone else won
1950 1950 # commit/push race), server aborts.
1951 1951 if force:
1952 1952 remoteheads = ['force']
1953 1953 # ssh: return remote's addchangegroup()
1954 1954 # http: return remote's addchangegroup() or 0 for error
1955 1955 ret = remote.unbundle(cg, remoteheads, 'push')
1956 1956 else:
1957 1957 # we return an integer indicating remote head count
1958 1958 # change
1959 1959 ret = remote.addchangegroup(cg, 'push', self.url())
1960 1960
1961 1961 if ret:
1962 1962 # push succeed, synchronize target of the push
1963 1963 cheads = outgoing.missingheads
1964 1964 elif revs is None:
1965 1965 # All out push fails. synchronize all common
1966 1966 cheads = outgoing.commonheads
1967 1967 else:
1968 1968 # I want cheads = heads(::missingheads and ::commonheads)
1969 1969 # (missingheads is revs with secret changeset filtered out)
1970 1970 #
1971 1971 # This can be expressed as:
1972 1972 # cheads = ( (missingheads and ::commonheads)
1973 1973 # + (commonheads and ::missingheads))"
1974 1974 # )
1975 1975 #
1976 1976 # while trying to push we already computed the following:
1977 1977 # common = (::commonheads)
1978 1978 # missing = ((commonheads::missingheads) - commonheads)
1979 1979 #
1980 1980 # We can pick:
1981 1981 # * missingheads part of common (::commonheads)
1982 1982 common = set(outgoing.common)
1983 1983 cheads = [node for node in revs if node in common]
1984 1984 # and
1985 1985 # * commonheads parents on missing
1986 1986 revset = unfi.set('%ln and parents(roots(%ln))',
1987 1987 outgoing.commonheads,
1988 1988 outgoing.missing)
1989 1989 cheads.extend(c.node() for c in revset)
1990 1990 # even when we don't push, exchanging phase data is useful
1991 1991 remotephases = remote.listkeys('phases')
1992 1992 if not remotephases: # old server or public only repo
1993 1993 phases.advanceboundary(self, phases.public, cheads)
1994 1994 # don't push any phase data as there is nothing to push
1995 1995 else:
1996 1996 ana = phases.analyzeremotephases(self, cheads, remotephases)
1997 1997 pheads, droots = ana
1998 1998 ### Apply remote phase on local
1999 1999 if remotephases.get('publishing', False):
2000 2000 phases.advanceboundary(self, phases.public, cheads)
2001 2001 else: # publish = False
2002 2002 phases.advanceboundary(self, phases.public, pheads)
2003 2003 phases.advanceboundary(self, phases.draft, cheads)
2004 2004 ### Apply local phase on remote
2005 2005
2006 2006 # Get the list of all revs draft on remote by public here.
2007 2007 # XXX Beware that revset break if droots is not strictly
2008 2008 # XXX root we may want to ensure it is but it is costly
2009 2009 outdated = unfi.set('heads((%ln::%ln) and public())',
2010 2010 droots, cheads)
2011 2011 for newremotehead in outdated:
2012 2012 r = remote.pushkey('phases',
2013 2013 newremotehead.hex(),
2014 2014 str(phases.draft),
2015 2015 str(phases.public))
2016 2016 if not r:
2017 2017 self.ui.warn(_('updating %s to public failed!\n')
2018 2018 % newremotehead)
2019 2019 self.ui.debug('try to push obsolete markers to remote\n')
2020 2020 if (obsolete._enabled and self.obsstore and
2021 2021 'obsolete' in remote.listkeys('namespaces')):
2022 2022 rslts = []
2023 2023 remotedata = self.listkeys('obsolete')
2024 2024 for key in sorted(remotedata, reverse=True):
2025 2025 # reverse sort to ensure we end with dump0
2026 2026 data = remotedata[key]
2027 2027 rslts.append(remote.pushkey('obsolete', key, '', data))
2028 2028 if [r for r in rslts if not r]:
2029 2029 msg = _('failed to push some obsolete markers!\n')
2030 2030 self.ui.warn(msg)
2031 2031 finally:
2032 2032 if lock is not None:
2033 2033 lock.release()
2034 2034 finally:
2035 2035 locallock.release()
2036 2036
2037 2037 self.ui.debug("checking for updated bookmarks\n")
2038 2038 rb = remote.listkeys('bookmarks')
2039 2039 for k in rb.keys():
2040 2040 if k in unfi._bookmarks:
2041 2041 nr, nl = rb[k], hex(self._bookmarks[k])
2042 2042 if nr in unfi:
2043 2043 cr = unfi[nr]
2044 2044 cl = unfi[nl]
2045 2045 if bookmarks.validdest(unfi, cr, cl):
2046 2046 r = remote.pushkey('bookmarks', k, nr, nl)
2047 2047 if r:
2048 2048 self.ui.status(_("updating bookmark %s\n") % k)
2049 2049 else:
2050 2050 self.ui.warn(_('updating bookmark %s'
2051 2051 ' failed!\n') % k)
2052 2052
2053 2053 return ret
2054 2054
2055 2055 def changegroupinfo(self, nodes, source):
2056 2056 if self.ui.verbose or source == 'bundle':
2057 2057 self.ui.status(_("%d changesets found\n") % len(nodes))
2058 2058 if self.ui.debugflag:
2059 2059 self.ui.debug("list of changesets:\n")
2060 2060 for node in nodes:
2061 2061 self.ui.debug("%s\n" % hex(node))
2062 2062
2063 2063 def changegroupsubset(self, bases, heads, source):
2064 2064 """Compute a changegroup consisting of all the nodes that are
2065 2065 descendants of any of the bases and ancestors of any of the heads.
2066 2066 Return a chunkbuffer object whose read() method will return
2067 2067 successive changegroup chunks.
2068 2068
2069 2069 It is fairly complex as determining which filenodes and which
2070 2070 manifest nodes need to be included for the changeset to be complete
2071 2071 is non-trivial.
2072 2072
2073 2073 Another wrinkle is doing the reverse, figuring out which changeset in
2074 2074 the changegroup a particular filenode or manifestnode belongs to.
2075 2075 """
2076 2076 cl = self.changelog
2077 2077 if not bases:
2078 2078 bases = [nullid]
2079 2079 csets, bases, heads = cl.nodesbetween(bases, heads)
2080 2080 # We assume that all ancestors of bases are known
2081 2081 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2082 2082 return self._changegroupsubset(common, csets, heads, source)
2083 2083
2084 2084 def getlocalbundle(self, source, outgoing):
2085 2085 """Like getbundle, but taking a discovery.outgoing as an argument.
2086 2086
2087 2087 This is only implemented for local repos and reuses potentially
2088 2088 precomputed sets in outgoing."""
2089 2089 if not outgoing.missing:
2090 2090 return None
2091 2091 return self._changegroupsubset(outgoing.common,
2092 2092 outgoing.missing,
2093 2093 outgoing.missingheads,
2094 2094 source)
2095 2095
2096 2096 def getbundle(self, source, heads=None, common=None):
2097 2097 """Like changegroupsubset, but returns the set difference between the
2098 2098 ancestors of heads and the ancestors common.
2099 2099
2100 2100 If heads is None, use the local heads. If common is None, use [nullid].
2101 2101
2102 2102 The nodes in common might not all be known locally due to the way the
2103 2103 current discovery protocol works.
2104 2104 """
2105 2105 cl = self.changelog
2106 2106 if common:
2107 2107 nm = cl.nodemap
2108 2108 common = [n for n in common if n in nm]
2109 2109 else:
2110 2110 common = [nullid]
2111 2111 if not heads:
2112 2112 heads = cl.heads()
2113 2113 return self.getlocalbundle(source,
2114 2114 discovery.outgoing(cl, common, heads))
2115 2115
2116 @unfilteredmeth
2116 @unfilteredmethod
2117 2117 def _changegroupsubset(self, commonrevs, csets, heads, source):
2118 2118
2119 2119 cl = self.changelog
2120 2120 mf = self.manifest
2121 2121 mfs = {} # needed manifests
2122 2122 fnodes = {} # needed file nodes
2123 2123 changedfiles = set()
2124 2124 fstate = ['', {}]
2125 2125 count = [0, 0]
2126 2126
2127 2127 # can we go through the fast path ?
2128 2128 heads.sort()
2129 2129 if heads == sorted(self.heads()):
2130 2130 return self._changegroup(csets, source)
2131 2131
2132 2132 # slow path
2133 2133 self.hook('preoutgoing', throw=True, source=source)
2134 2134 self.changegroupinfo(csets, source)
2135 2135
2136 2136 # filter any nodes that claim to be part of the known set
2137 2137 def prune(revlog, missing):
2138 2138 rr, rl = revlog.rev, revlog.linkrev
2139 2139 return [n for n in missing
2140 2140 if rl(rr(n)) not in commonrevs]
2141 2141
2142 2142 progress = self.ui.progress
2143 2143 _bundling = _('bundling')
2144 2144 _changesets = _('changesets')
2145 2145 _manifests = _('manifests')
2146 2146 _files = _('files')
2147 2147
2148 2148 def lookup(revlog, x):
2149 2149 if revlog == cl:
2150 2150 c = cl.read(x)
2151 2151 changedfiles.update(c[3])
2152 2152 mfs.setdefault(c[0], x)
2153 2153 count[0] += 1
2154 2154 progress(_bundling, count[0],
2155 2155 unit=_changesets, total=count[1])
2156 2156 return x
2157 2157 elif revlog == mf:
2158 2158 clnode = mfs[x]
2159 2159 mdata = mf.readfast(x)
2160 2160 for f, n in mdata.iteritems():
2161 2161 if f in changedfiles:
2162 2162 fnodes[f].setdefault(n, clnode)
2163 2163 count[0] += 1
2164 2164 progress(_bundling, count[0],
2165 2165 unit=_manifests, total=count[1])
2166 2166 return clnode
2167 2167 else:
2168 2168 progress(_bundling, count[0], item=fstate[0],
2169 2169 unit=_files, total=count[1])
2170 2170 return fstate[1][x]
2171 2171
2172 2172 bundler = changegroup.bundle10(lookup)
2173 2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2174 2174 if reorder == 'auto':
2175 2175 reorder = None
2176 2176 else:
2177 2177 reorder = util.parsebool(reorder)
2178 2178
2179 2179 def gengroup():
2180 2180 # Create a changenode group generator that will call our functions
2181 2181 # back to lookup the owning changenode and collect information.
2182 2182 count[:] = [0, len(csets)]
2183 2183 for chunk in cl.group(csets, bundler, reorder=reorder):
2184 2184 yield chunk
2185 2185 progress(_bundling, None)
2186 2186
2187 2187 # Create a generator for the manifestnodes that calls our lookup
2188 2188 # and data collection functions back.
2189 2189 for f in changedfiles:
2190 2190 fnodes[f] = {}
2191 2191 count[:] = [0, len(mfs)]
2192 2192 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2193 2193 yield chunk
2194 2194 progress(_bundling, None)
2195 2195
2196 2196 mfs.clear()
2197 2197
2198 2198 # Go through all our files in order sorted by name.
2199 2199 count[:] = [0, len(changedfiles)]
2200 2200 for fname in sorted(changedfiles):
2201 2201 filerevlog = self.file(fname)
2202 2202 if not len(filerevlog):
2203 2203 raise util.Abort(_("empty or missing revlog for %s")
2204 2204 % fname)
2205 2205 fstate[0] = fname
2206 2206 fstate[1] = fnodes.pop(fname, {})
2207 2207
2208 2208 nodelist = prune(filerevlog, fstate[1])
2209 2209 if nodelist:
2210 2210 count[0] += 1
2211 2211 yield bundler.fileheader(fname)
2212 2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2213 2213 yield chunk
2214 2214
2215 2215 # Signal that no more groups are left.
2216 2216 yield bundler.close()
2217 2217 progress(_bundling, None)
2218 2218
2219 2219 if csets:
2220 2220 self.hook('outgoing', node=hex(csets[0]), source=source)
2221 2221
2222 2222 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2223 2223
2224 2224 def changegroup(self, basenodes, source):
2225 2225 # to avoid a race we use changegroupsubset() (issue1320)
2226 2226 return self.changegroupsubset(basenodes, self.heads(), source)
2227 2227
2228 @unfilteredmeth
2228 @unfilteredmethod
2229 2229 def _changegroup(self, nodes, source):
2230 2230 """Compute the changegroup of all nodes that we have that a recipient
2231 2231 doesn't. Return a chunkbuffer object whose read() method will return
2232 2232 successive changegroup chunks.
2233 2233
2234 2234 This is much easier than the previous function as we can assume that
2235 2235 the recipient has any changenode we aren't sending them.
2236 2236
2237 2237 nodes is the set of nodes to send"""
2238 2238
2239 2239 cl = self.changelog
2240 2240 mf = self.manifest
2241 2241 mfs = {}
2242 2242 changedfiles = set()
2243 2243 fstate = ['']
2244 2244 count = [0, 0]
2245 2245
2246 2246 self.hook('preoutgoing', throw=True, source=source)
2247 2247 self.changegroupinfo(nodes, source)
2248 2248
2249 2249 revset = set([cl.rev(n) for n in nodes])
2250 2250
2251 2251 def gennodelst(log):
2252 2252 ln, llr = log.node, log.linkrev
2253 2253 return [ln(r) for r in log if llr(r) in revset]
2254 2254
2255 2255 progress = self.ui.progress
2256 2256 _bundling = _('bundling')
2257 2257 _changesets = _('changesets')
2258 2258 _manifests = _('manifests')
2259 2259 _files = _('files')
2260 2260
2261 2261 def lookup(revlog, x):
2262 2262 if revlog == cl:
2263 2263 c = cl.read(x)
2264 2264 changedfiles.update(c[3])
2265 2265 mfs.setdefault(c[0], x)
2266 2266 count[0] += 1
2267 2267 progress(_bundling, count[0],
2268 2268 unit=_changesets, total=count[1])
2269 2269 return x
2270 2270 elif revlog == mf:
2271 2271 count[0] += 1
2272 2272 progress(_bundling, count[0],
2273 2273 unit=_manifests, total=count[1])
2274 2274 return cl.node(revlog.linkrev(revlog.rev(x)))
2275 2275 else:
2276 2276 progress(_bundling, count[0], item=fstate[0],
2277 2277 total=count[1], unit=_files)
2278 2278 return cl.node(revlog.linkrev(revlog.rev(x)))
2279 2279
2280 2280 bundler = changegroup.bundle10(lookup)
2281 2281 reorder = self.ui.config('bundle', 'reorder', 'auto')
2282 2282 if reorder == 'auto':
2283 2283 reorder = None
2284 2284 else:
2285 2285 reorder = util.parsebool(reorder)
2286 2286
2287 2287 def gengroup():
2288 2288 '''yield a sequence of changegroup chunks (strings)'''
2289 2289 # construct a list of all changed files
2290 2290
2291 2291 count[:] = [0, len(nodes)]
2292 2292 for chunk in cl.group(nodes, bundler, reorder=reorder):
2293 2293 yield chunk
2294 2294 progress(_bundling, None)
2295 2295
2296 2296 count[:] = [0, len(mfs)]
2297 2297 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2298 2298 yield chunk
2299 2299 progress(_bundling, None)
2300 2300
2301 2301 count[:] = [0, len(changedfiles)]
2302 2302 for fname in sorted(changedfiles):
2303 2303 filerevlog = self.file(fname)
2304 2304 if not len(filerevlog):
2305 2305 raise util.Abort(_("empty or missing revlog for %s")
2306 2306 % fname)
2307 2307 fstate[0] = fname
2308 2308 nodelist = gennodelst(filerevlog)
2309 2309 if nodelist:
2310 2310 count[0] += 1
2311 2311 yield bundler.fileheader(fname)
2312 2312 for chunk in filerevlog.group(nodelist, bundler, reorder):
2313 2313 yield chunk
2314 2314 yield bundler.close()
2315 2315 progress(_bundling, None)
2316 2316
2317 2317 if nodes:
2318 2318 self.hook('outgoing', node=hex(nodes[0]), source=source)
2319 2319
2320 2320 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2321 2321
2322 @unfilteredmeth
2322 @unfilteredmethod
2323 2323 def addchangegroup(self, source, srctype, url, emptyok=False):
2324 2324 """Add the changegroup returned by source.read() to this repo.
2325 2325 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2326 2326 the URL of the repo where this changegroup is coming from.
2327 2327
2328 2328 Return an integer summarizing the change to this repo:
2329 2329 - nothing changed or no source: 0
2330 2330 - more heads than before: 1+added heads (2..n)
2331 2331 - fewer heads than before: -1-removed heads (-2..-n)
2332 2332 - number of heads stays the same: 1
2333 2333 """
2334 2334 def csmap(x):
2335 2335 self.ui.debug("add changeset %s\n" % short(x))
2336 2336 return len(cl)
2337 2337
2338 2338 def revmap(x):
2339 2339 return cl.rev(x)
2340 2340
2341 2341 if not source:
2342 2342 return 0
2343 2343
2344 2344 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2345 2345
2346 2346 changesets = files = revisions = 0
2347 2347 efiles = set()
2348 2348
2349 2349 # write changelog data to temp files so concurrent readers will not see
2350 2350 # inconsistent view
2351 2351 cl = self.changelog
2352 2352 cl.delayupdate()
2353 2353 oldheads = cl.heads()
2354 2354
2355 2355 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2356 2356 try:
2357 2357 trp = weakref.proxy(tr)
2358 2358 # pull off the changeset group
2359 2359 self.ui.status(_("adding changesets\n"))
2360 2360 clstart = len(cl)
2361 2361 class prog(object):
2362 2362 step = _('changesets')
2363 2363 count = 1
2364 2364 ui = self.ui
2365 2365 total = None
2366 2366 def __call__(self):
2367 2367 self.ui.progress(self.step, self.count, unit=_('chunks'),
2368 2368 total=self.total)
2369 2369 self.count += 1
2370 2370 pr = prog()
2371 2371 source.callback = pr
2372 2372
2373 2373 source.changelogheader()
2374 2374 srccontent = cl.addgroup(source, csmap, trp)
2375 2375 if not (srccontent or emptyok):
2376 2376 raise util.Abort(_("received changelog group is empty"))
2377 2377 clend = len(cl)
2378 2378 changesets = clend - clstart
2379 2379 for c in xrange(clstart, clend):
2380 2380 efiles.update(self[c].files())
2381 2381 efiles = len(efiles)
2382 2382 self.ui.progress(_('changesets'), None)
2383 2383
2384 2384 # pull off the manifest group
2385 2385 self.ui.status(_("adding manifests\n"))
2386 2386 pr.step = _('manifests')
2387 2387 pr.count = 1
2388 2388 pr.total = changesets # manifests <= changesets
2389 2389 # no need to check for empty manifest group here:
2390 2390 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2391 2391 # no new manifest will be created and the manifest group will
2392 2392 # be empty during the pull
2393 2393 source.manifestheader()
2394 2394 self.manifest.addgroup(source, revmap, trp)
2395 2395 self.ui.progress(_('manifests'), None)
2396 2396
2397 2397 needfiles = {}
2398 2398 if self.ui.configbool('server', 'validate', default=False):
2399 2399 # validate incoming csets have their manifests
2400 2400 for cset in xrange(clstart, clend):
2401 2401 mfest = self.changelog.read(self.changelog.node(cset))[0]
2402 2402 mfest = self.manifest.readdelta(mfest)
2403 2403 # store file nodes we must see
2404 2404 for f, n in mfest.iteritems():
2405 2405 needfiles.setdefault(f, set()).add(n)
2406 2406
2407 2407 # process the files
2408 2408 self.ui.status(_("adding file changes\n"))
2409 2409 pr.step = _('files')
2410 2410 pr.count = 1
2411 2411 pr.total = efiles
2412 2412 source.callback = None
2413 2413
2414 2414 while True:
2415 2415 chunkdata = source.filelogheader()
2416 2416 if not chunkdata:
2417 2417 break
2418 2418 f = chunkdata["filename"]
2419 2419 self.ui.debug("adding %s revisions\n" % f)
2420 2420 pr()
2421 2421 fl = self.file(f)
2422 2422 o = len(fl)
2423 2423 if not fl.addgroup(source, revmap, trp):
2424 2424 raise util.Abort(_("received file revlog group is empty"))
2425 2425 revisions += len(fl) - o
2426 2426 files += 1
2427 2427 if f in needfiles:
2428 2428 needs = needfiles[f]
2429 2429 for new in xrange(o, len(fl)):
2430 2430 n = fl.node(new)
2431 2431 if n in needs:
2432 2432 needs.remove(n)
2433 2433 if not needs:
2434 2434 del needfiles[f]
2435 2435 self.ui.progress(_('files'), None)
2436 2436
2437 2437 for f, needs in needfiles.iteritems():
2438 2438 fl = self.file(f)
2439 2439 for n in needs:
2440 2440 try:
2441 2441 fl.rev(n)
2442 2442 except error.LookupError:
2443 2443 raise util.Abort(
2444 2444 _('missing file data for %s:%s - run hg verify') %
2445 2445 (f, hex(n)))
2446 2446
2447 2447 dh = 0
2448 2448 if oldheads:
2449 2449 heads = cl.heads()
2450 2450 dh = len(heads) - len(oldheads)
2451 2451 for h in heads:
2452 2452 if h not in oldheads and self[h].closesbranch():
2453 2453 dh -= 1
2454 2454 htext = ""
2455 2455 if dh:
2456 2456 htext = _(" (%+d heads)") % dh
2457 2457
2458 2458 self.ui.status(_("added %d changesets"
2459 2459 " with %d changes to %d files%s\n")
2460 2460 % (changesets, revisions, files, htext))
2461 2461 obsolete.clearobscaches(self)
2462 2462
2463 2463 if changesets > 0:
2464 2464 p = lambda: cl.writepending() and self.root or ""
2465 2465 self.hook('pretxnchangegroup', throw=True,
2466 2466 node=hex(cl.node(clstart)), source=srctype,
2467 2467 url=url, pending=p)
2468 2468
2469 2469 added = [cl.node(r) for r in xrange(clstart, clend)]
2470 2470 publishing = self.ui.configbool('phases', 'publish', True)
2471 2471 if srctype == 'push':
2472 2472 # Old server can not push the boundary themself.
2473 2473 # New server won't push the boundary if changeset already
2474 2474 # existed locally as secrete
2475 2475 #
2476 2476 # We should not use added here but the list of all change in
2477 2477 # the bundle
2478 2478 if publishing:
2479 2479 phases.advanceboundary(self, phases.public, srccontent)
2480 2480 else:
2481 2481 phases.advanceboundary(self, phases.draft, srccontent)
2482 2482 phases.retractboundary(self, phases.draft, added)
2483 2483 elif srctype != 'strip':
2484 2484 # publishing only alter behavior during push
2485 2485 #
2486 2486 # strip should not touch boundary at all
2487 2487 phases.retractboundary(self, phases.draft, added)
2488 2488
2489 2489 # make changelog see real files again
2490 2490 cl.finalize(trp)
2491 2491
2492 2492 tr.close()
2493 2493
2494 2494 if changesets > 0:
2495 2495 self.updatebranchcache()
2496 2496 def runhooks():
2497 2497 # forcefully update the on-disk branch cache
2498 2498 self.ui.debug("updating the branch cache\n")
2499 2499 self.hook("changegroup", node=hex(cl.node(clstart)),
2500 2500 source=srctype, url=url)
2501 2501
2502 2502 for n in added:
2503 2503 self.hook("incoming", node=hex(n), source=srctype,
2504 2504 url=url)
2505 2505 self._afterlock(runhooks)
2506 2506
2507 2507 finally:
2508 2508 tr.release()
2509 2509 # never return 0 here:
2510 2510 if dh < 0:
2511 2511 return dh - 1
2512 2512 else:
2513 2513 return dh + 1
2514 2514
2515 2515 def stream_in(self, remote, requirements):
2516 2516 lock = self.lock()
2517 2517 try:
2518 2518 # Save remote branchmap. We will use it later
2519 2519 # to speed up branchcache creation
2520 2520 rbranchmap = None
2521 2521 if remote.capable("branchmap"):
2522 2522 rbranchmap = remote.branchmap()
2523 2523
2524 2524 fp = remote.stream_out()
2525 2525 l = fp.readline()
2526 2526 try:
2527 2527 resp = int(l)
2528 2528 except ValueError:
2529 2529 raise error.ResponseError(
2530 2530 _('unexpected response from remote server:'), l)
2531 2531 if resp == 1:
2532 2532 raise util.Abort(_('operation forbidden by server'))
2533 2533 elif resp == 2:
2534 2534 raise util.Abort(_('locking the remote repository failed'))
2535 2535 elif resp != 0:
2536 2536 raise util.Abort(_('the server sent an unknown error code'))
2537 2537 self.ui.status(_('streaming all changes\n'))
2538 2538 l = fp.readline()
2539 2539 try:
2540 2540 total_files, total_bytes = map(int, l.split(' ', 1))
2541 2541 except (ValueError, TypeError):
2542 2542 raise error.ResponseError(
2543 2543 _('unexpected response from remote server:'), l)
2544 2544 self.ui.status(_('%d files to transfer, %s of data\n') %
2545 2545 (total_files, util.bytecount(total_bytes)))
2546 2546 handled_bytes = 0
2547 2547 self.ui.progress(_('clone'), 0, total=total_bytes)
2548 2548 start = time.time()
2549 2549 for i in xrange(total_files):
2550 2550 # XXX doesn't support '\n' or '\r' in filenames
2551 2551 l = fp.readline()
2552 2552 try:
2553 2553 name, size = l.split('\0', 1)
2554 2554 size = int(size)
2555 2555 except (ValueError, TypeError):
2556 2556 raise error.ResponseError(
2557 2557 _('unexpected response from remote server:'), l)
2558 2558 if self.ui.debugflag:
2559 2559 self.ui.debug('adding %s (%s)\n' %
2560 2560 (name, util.bytecount(size)))
2561 2561 # for backwards compat, name was partially encoded
2562 2562 ofp = self.sopener(store.decodedir(name), 'w')
2563 2563 for chunk in util.filechunkiter(fp, limit=size):
2564 2564 handled_bytes += len(chunk)
2565 2565 self.ui.progress(_('clone'), handled_bytes,
2566 2566 total=total_bytes)
2567 2567 ofp.write(chunk)
2568 2568 ofp.close()
2569 2569 elapsed = time.time() - start
2570 2570 if elapsed <= 0:
2571 2571 elapsed = 0.001
2572 2572 self.ui.progress(_('clone'), None)
2573 2573 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2574 2574 (util.bytecount(total_bytes), elapsed,
2575 2575 util.bytecount(total_bytes / elapsed)))
2576 2576
2577 2577 # new requirements = old non-format requirements +
2578 2578 # new format-related
2579 2579 # requirements from the streamed-in repository
2580 2580 requirements.update(set(self.requirements) - self.supportedformats)
2581 2581 self._applyrequirements(requirements)
2582 2582 self._writerequirements()
2583 2583
2584 2584 if rbranchmap:
2585 2585 rbheads = []
2586 2586 for bheads in rbranchmap.itervalues():
2587 2587 rbheads.extend(bheads)
2588 2588
2589 2589 self.branchcache = rbranchmap
2590 2590 if rbheads:
2591 2591 rtiprev = max((int(self.changelog.rev(node))
2592 2592 for node in rbheads))
2593 2593 self._writebranchcache(self.branchcache,
2594 2594 self[rtiprev].node(), rtiprev)
2595 2595 self.invalidate()
2596 2596 return len(self.heads()) + 1
2597 2597 finally:
2598 2598 lock.release()
2599 2599
2600 2600 def clone(self, remote, heads=[], stream=False):
2601 2601 '''clone remote repository.
2602 2602
2603 2603 keyword arguments:
2604 2604 heads: list of revs to clone (forces use of pull)
2605 2605 stream: use streaming clone if possible'''
2606 2606
2607 2607 # now, all clients that can request uncompressed clones can
2608 2608 # read repo formats supported by all servers that can serve
2609 2609 # them.
2610 2610
2611 2611 # if revlog format changes, client will have to check version
2612 2612 # and format flags on "stream" capability, and use
2613 2613 # uncompressed only if compatible.
2614 2614
2615 2615 if not stream:
2616 2616 # if the server explicitly prefers to stream (for fast LANs)
2617 2617 stream = remote.capable('stream-preferred')
2618 2618
2619 2619 if stream and not heads:
2620 2620 # 'stream' means remote revlog format is revlogv1 only
2621 2621 if remote.capable('stream'):
2622 2622 return self.stream_in(remote, set(('revlogv1',)))
2623 2623 # otherwise, 'streamreqs' contains the remote revlog format
2624 2624 streamreqs = remote.capable('streamreqs')
2625 2625 if streamreqs:
2626 2626 streamreqs = set(streamreqs.split(','))
2627 2627 # if we support it, stream in and adjust our requirements
2628 2628 if not streamreqs - self.supportedformats:
2629 2629 return self.stream_in(remote, streamreqs)
2630 2630 return self.pull(remote, heads)
2631 2631
2632 2632 def pushkey(self, namespace, key, old, new):
2633 2633 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2634 2634 old=old, new=new)
2635 2635 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2636 2636 ret = pushkey.push(self, namespace, key, old, new)
2637 2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2638 2638 ret=ret)
2639 2639 return ret
2640 2640
2641 2641 def listkeys(self, namespace):
2642 2642 self.hook('prelistkeys', throw=True, namespace=namespace)
2643 2643 self.ui.debug('listing keys for "%s"\n' % namespace)
2644 2644 values = pushkey.list(self, namespace)
2645 2645 self.hook('listkeys', namespace=namespace, values=values)
2646 2646 return values
2647 2647
2648 2648 def debugwireargs(self, one, two, three=None, four=None, five=None):
2649 2649 '''used to test argument passing over the wire'''
2650 2650 return "%s %s %s %s %s" % (one, two, three, four, five)
2651 2651
2652 2652 def savecommitmessage(self, text):
2653 2653 fp = self.opener('last-message.txt', 'wb')
2654 2654 try:
2655 2655 fp.write(text)
2656 2656 finally:
2657 2657 fp.close()
2658 2658 return self.pathto(fp.name[len(self.root)+1:])
2659 2659
2660 2660 # used to avoid circular references so destructors work
2661 2661 def aftertrans(files):
2662 2662 renamefiles = [tuple(t) for t in files]
2663 2663 def a():
2664 2664 for src, dest in renamefiles:
2665 2665 try:
2666 2666 util.rename(src, dest)
2667 2667 except OSError: # journal file does not yet exist
2668 2668 pass
2669 2669 return a
2670 2670
2671 2671 def undoname(fn):
2672 2672 base, name = os.path.split(fn)
2673 2673 assert name.startswith('journal')
2674 2674 return os.path.join(base, name.replace('journal', 'undo', 1))
2675 2675
2676 2676 def instance(ui, path, create):
2677 2677 return localrepository(ui, util.urllocalpath(path), create)
2678 2678
2679 2679 def islocal(path):
2680 2680 return True
General Comments 0
You need to be logged in to leave comments. Login now