##// END OF EJS Templates
localrepo: introduce shared method to check if a repository is shared...
Angel Ezquerra -
r23666:965788d9 default
parent child Browse files
Show More
@@ -1,125 +1,125 b''
1 1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 '''share a common history between several working directories'''
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial import cmdutil, hg, util, extensions, bookmarks
10 10 from mercurial.hg import repository, parseurl
11 11 import errno
12 12
13 13 cmdtable = {}
14 14 command = cmdutil.command(cmdtable)
15 15 testedwith = 'internal'
16 16
17 17 @command('share',
18 18 [('U', 'noupdate', None, _('do not create a working copy')),
19 19 ('B', 'bookmarks', None, _('also share bookmarks'))],
20 20 _('[-U] [-B] SOURCE [DEST]'),
21 21 norepo=True)
22 22 def share(ui, source, dest=None, noupdate=False, bookmarks=False):
23 23 """create a new shared repository
24 24
25 25 Initialize a new repository and working directory that shares its
26 26 history (and optionally bookmarks) with another repository.
27 27
28 28 .. note::
29 29
30 30 using rollback or extensions that destroy/modify history (mq,
31 31 rebase, etc.) can cause considerable confusion with shared
32 32 clones. In particular, if two shared clones are both updated to
33 33 the same changeset, and one of them destroys that changeset
34 34 with rollback, the other clone will suddenly stop working: all
35 35 operations will fail with "abort: working directory has unknown
36 36 parent". The only known workaround is to use debugsetparents on
37 37 the broken clone to reset it to a changeset that still exists.
38 38 """
39 39
40 40 return hg.share(ui, source, dest, not noupdate, bookmarks)
41 41
42 42 @command('unshare', [], '')
43 43 def unshare(ui, repo):
44 44 """convert a shared repository to a normal one
45 45
46 46 Copy the store data to the repo and remove the sharedpath data.
47 47 """
48 48
49 if repo.sharedpath == repo.path:
49 if not repo.shared():
50 50 raise util.Abort(_("this is not a shared repo"))
51 51
52 52 destlock = lock = None
53 53 lock = repo.lock()
54 54 try:
55 55 # we use locks here because if we race with commit, we
56 56 # can end up with extra data in the cloned revlogs that's
57 57 # not pointed to by changesets, thus causing verify to
58 58 # fail
59 59
60 60 destlock = hg.copystore(ui, repo, repo.path)
61 61
62 62 sharefile = repo.join('sharedpath')
63 63 util.rename(sharefile, sharefile + '.old')
64 64
65 65 repo.requirements.discard('sharedpath')
66 66 repo._writerequirements()
67 67 finally:
68 68 destlock and destlock.release()
69 69 lock and lock.release()
70 70
71 71 # update store, spath, sopener and sjoin of repo
72 72 repo.unfiltered().__init__(repo.baseui, repo.root)
73 73
74 74 def extsetup(ui):
75 75 extensions.wrapfunction(bookmarks.bmstore, 'getbkfile', getbkfile)
76 76 extensions.wrapfunction(bookmarks.bmstore, 'recordchange', recordchange)
77 77 extensions.wrapfunction(bookmarks.bmstore, 'write', write)
78 78
79 79 def _hassharedbookmarks(repo):
80 80 """Returns whether this repo has shared bookmarks"""
81 81 try:
82 82 repo.vfs.read('bookmarks.shared')
83 83 return True
84 84 except IOError, inst:
85 85 if inst.errno != errno.ENOENT:
86 86 raise
87 87 return False
88 88
89 89 def _getsrcrepo(repo):
90 90 """
91 91 Returns the source repository object for a given shared repository.
92 92 If repo is not a shared repository, return None.
93 93 """
94 94 if repo.sharedpath == repo.path:
95 95 return None
96 96
97 97 # the sharedpath always ends in the .hg; we want the path to the repo
98 98 source = repo.vfs.split(repo.sharedpath)[0]
99 99 srcurl, branches = parseurl(source)
100 100 return repository(repo.ui, srcurl)
101 101
102 102 def getbkfile(orig, self, repo):
103 103 if _hassharedbookmarks(repo):
104 104 srcrepo = _getsrcrepo(repo)
105 105 if srcrepo is not None:
106 106 repo = srcrepo
107 107 return orig(self, repo)
108 108
109 109 def recordchange(orig, self, tr):
110 110 # Continue with write to local bookmarks file as usual
111 111 orig(self, tr)
112 112
113 113 if _hassharedbookmarks(self._repo):
114 114 srcrepo = _getsrcrepo(self._repo)
115 115 if srcrepo is not None:
116 116 category = 'share-bookmarks'
117 117 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
118 118
119 119 def write(orig, self):
120 120 # First write local bookmarks file in case we ever unshare
121 121 orig(self)
122 122 if _hassharedbookmarks(self._repo):
123 123 srcrepo = _getsrcrepo(self._repo)
124 124 if srcrepo is not None:
125 125 self._writerepo(srcrepo)
@@ -1,1833 +1,1839 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 177 supportedformats = set(('revlogv1', 'generaldelta'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 180 openerreqs = set(('revlogv1', 'generaldelta'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 244 requirements = set(requirements)
245 245 else:
246 246 raise error.RepoError(_("repository %s not found") % path)
247 247 elif create:
248 248 raise error.RepoError(_("repository %s already exists") % path)
249 249 else:
250 250 try:
251 251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 252 except IOError, inst:
253 253 if inst.errno != errno.ENOENT:
254 254 raise
255 255 requirements = set()
256 256
257 257 self.sharedpath = self.path
258 258 try:
259 259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 260 realpath=True)
261 261 s = vfs.base
262 262 if not vfs.exists():
263 263 raise error.RepoError(
264 264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 265 self.sharedpath = s
266 266 except IOError, inst:
267 267 if inst.errno != errno.ENOENT:
268 268 raise
269 269
270 270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 271 self.spath = self.store.path
272 272 self.svfs = self.store.vfs
273 273 self.sopener = self.svfs
274 274 self.sjoin = self.store.join
275 275 self.vfs.createmode = self.store.createmode
276 276 self._applyrequirements(requirements)
277 277 if create:
278 278 self._writerequirements()
279 279
280 280
281 281 self._branchcaches = {}
282 282 self.filterpats = {}
283 283 self._datafilters = {}
284 284 self._transref = self._lockref = self._wlockref = None
285 285
286 286 # A cache for various files under .hg/ that tracks file changes,
287 287 # (used by the filecache decorator)
288 288 #
289 289 # Maps a property name to its util.filecacheentry
290 290 self._filecache = {}
291 291
292 292 # hold sets of revision to be filtered
293 293 # should be cleared when something might have changed the filter value:
294 294 # - new changesets,
295 295 # - phase change,
296 296 # - new obsolescence marker,
297 297 # - working directory parent change,
298 298 # - bookmark changes
299 299 self.filteredrevcache = {}
300 300
301 301 # generic mapping between names and nodes
302 302 self.names = namespaces.namespaces()
303 303
304 304 def close(self):
305 305 pass
306 306
307 307 def _restrictcapabilities(self, caps):
308 308 # bundle2 is not ready for prime time, drop it unless explicitly
309 309 # required by the tests (or some brave tester)
310 310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 311 caps = set(caps)
312 312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 314 return caps
315 315
316 316 def _applyrequirements(self, requirements):
317 317 self.requirements = requirements
318 318 self.sopener.options = dict((r, 1) for r in requirements
319 319 if r in self.openerreqs)
320 320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 321 if chunkcachesize is not None:
322 322 self.sopener.options['chunkcachesize'] = chunkcachesize
323 323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 324 if maxchainlen is not None:
325 325 self.sopener.options['maxchainlen'] = maxchainlen
326 326
327 327 def _writerequirements(self):
328 328 reqfile = self.opener("requires", "w")
329 329 for r in sorted(self.requirements):
330 330 reqfile.write("%s\n" % r)
331 331 reqfile.close()
332 332
333 333 def _checknested(self, path):
334 334 """Determine if path is a legal nested repository."""
335 335 if not path.startswith(self.root):
336 336 return False
337 337 subpath = path[len(self.root) + 1:]
338 338 normsubpath = util.pconvert(subpath)
339 339
340 340 # XXX: Checking against the current working copy is wrong in
341 341 # the sense that it can reject things like
342 342 #
343 343 # $ hg cat -r 10 sub/x.txt
344 344 #
345 345 # if sub/ is no longer a subrepository in the working copy
346 346 # parent revision.
347 347 #
348 348 # However, it can of course also allow things that would have
349 349 # been rejected before, such as the above cat command if sub/
350 350 # is a subrepository now, but was a normal directory before.
351 351 # The old path auditor would have rejected by mistake since it
352 352 # panics when it sees sub/.hg/.
353 353 #
354 354 # All in all, checking against the working copy seems sensible
355 355 # since we want to prevent access to nested repositories on
356 356 # the filesystem *now*.
357 357 ctx = self[None]
358 358 parts = util.splitpath(subpath)
359 359 while parts:
360 360 prefix = '/'.join(parts)
361 361 if prefix in ctx.substate:
362 362 if prefix == normsubpath:
363 363 return True
364 364 else:
365 365 sub = ctx.sub(prefix)
366 366 return sub.checknested(subpath[len(prefix) + 1:])
367 367 else:
368 368 parts.pop()
369 369 return False
370 370
371 371 def peer(self):
372 372 return localpeer(self) # not cached to avoid reference cycle
373 373
374 374 def unfiltered(self):
375 375 """Return unfiltered version of the repository
376 376
377 377 Intended to be overwritten by filtered repo."""
378 378 return self
379 379
380 380 def filtered(self, name):
381 381 """Return a filtered version of a repository"""
382 382 # build a new class with the mixin and the current class
383 383 # (possibly subclass of the repo)
384 384 class proxycls(repoview.repoview, self.unfiltered().__class__):
385 385 pass
386 386 return proxycls(self, name)
387 387
388 388 @repofilecache('bookmarks')
389 389 def _bookmarks(self):
390 390 return bookmarks.bmstore(self)
391 391
392 392 @repofilecache('bookmarks.current')
393 393 def _bookmarkcurrent(self):
394 394 return bookmarks.readcurrent(self)
395 395
396 396 def bookmarkheads(self, bookmark):
397 397 name = bookmark.split('@', 1)[0]
398 398 heads = []
399 399 for mark, n in self._bookmarks.iteritems():
400 400 if mark.split('@', 1)[0] == name:
401 401 heads.append(n)
402 402 return heads
403 403
404 404 @storecache('phaseroots')
405 405 def _phasecache(self):
406 406 return phases.phasecache(self, self._phasedefaults)
407 407
408 408 @storecache('obsstore')
409 409 def obsstore(self):
410 410 # read default format for new obsstore.
411 411 defaultformat = self.ui.configint('format', 'obsstore-version', None)
412 412 # rely on obsstore class default when possible.
413 413 kwargs = {}
414 414 if defaultformat is not None:
415 415 kwargs['defaultformat'] = defaultformat
416 416 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
417 417 store = obsolete.obsstore(self.sopener, readonly=readonly,
418 418 **kwargs)
419 419 if store and readonly:
420 420 # message is rare enough to not be translated
421 421 msg = 'obsolete feature not enabled but %i markers found!\n'
422 422 self.ui.warn(msg % len(list(store)))
423 423 return store
424 424
425 425 @storecache('00changelog.i')
426 426 def changelog(self):
427 427 c = changelog.changelog(self.sopener)
428 428 if 'HG_PENDING' in os.environ:
429 429 p = os.environ['HG_PENDING']
430 430 if p.startswith(self.root):
431 431 c.readpending('00changelog.i.a')
432 432 return c
433 433
434 434 @storecache('00manifest.i')
435 435 def manifest(self):
436 436 return manifest.manifest(self.sopener)
437 437
438 438 @repofilecache('dirstate')
439 439 def dirstate(self):
440 440 warned = [0]
441 441 def validate(node):
442 442 try:
443 443 self.changelog.rev(node)
444 444 return node
445 445 except error.LookupError:
446 446 if not warned[0]:
447 447 warned[0] = True
448 448 self.ui.warn(_("warning: ignoring unknown"
449 449 " working parent %s!\n") % short(node))
450 450 return nullid
451 451
452 452 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
453 453
454 454 def __getitem__(self, changeid):
455 455 if changeid is None:
456 456 return context.workingctx(self)
457 457 if isinstance(changeid, slice):
458 458 return [context.changectx(self, i)
459 459 for i in xrange(*changeid.indices(len(self)))
460 460 if i not in self.changelog.filteredrevs]
461 461 return context.changectx(self, changeid)
462 462
463 463 def __contains__(self, changeid):
464 464 try:
465 465 return bool(self.lookup(changeid))
466 466 except error.RepoLookupError:
467 467 return False
468 468
469 469 def __nonzero__(self):
470 470 return True
471 471
472 472 def __len__(self):
473 473 return len(self.changelog)
474 474
475 475 def __iter__(self):
476 476 return iter(self.changelog)
477 477
478 478 def revs(self, expr, *args):
479 479 '''Return a list of revisions matching the given revset'''
480 480 expr = revset.formatspec(expr, *args)
481 481 m = revset.match(None, expr)
482 482 return m(self, revset.spanset(self))
483 483
484 484 def set(self, expr, *args):
485 485 '''
486 486 Yield a context for each matching revision, after doing arg
487 487 replacement via revset.formatspec
488 488 '''
489 489 for r in self.revs(expr, *args):
490 490 yield self[r]
491 491
492 492 def url(self):
493 493 return 'file:' + self.root
494 494
495 495 def hook(self, name, throw=False, **args):
496 496 """Call a hook, passing this repo instance.
497 497
498 498 This a convenience method to aid invoking hooks. Extensions likely
499 499 won't call this unless they have registered a custom hook or are
500 500 replacing code that is expected to call a hook.
501 501 """
502 502 return hook.hook(self.ui, self, name, throw, **args)
503 503
504 504 @unfilteredmethod
505 505 def _tag(self, names, node, message, local, user, date, extra={},
506 506 editor=False):
507 507 if isinstance(names, str):
508 508 names = (names,)
509 509
510 510 branches = self.branchmap()
511 511 for name in names:
512 512 self.hook('pretag', throw=True, node=hex(node), tag=name,
513 513 local=local)
514 514 if name in branches:
515 515 self.ui.warn(_("warning: tag %s conflicts with existing"
516 516 " branch name\n") % name)
517 517
518 518 def writetags(fp, names, munge, prevtags):
519 519 fp.seek(0, 2)
520 520 if prevtags and prevtags[-1] != '\n':
521 521 fp.write('\n')
522 522 for name in names:
523 523 m = munge and munge(name) or name
524 524 if (self._tagscache.tagtypes and
525 525 name in self._tagscache.tagtypes):
526 526 old = self.tags().get(name, nullid)
527 527 fp.write('%s %s\n' % (hex(old), m))
528 528 fp.write('%s %s\n' % (hex(node), m))
529 529 fp.close()
530 530
531 531 prevtags = ''
532 532 if local:
533 533 try:
534 534 fp = self.opener('localtags', 'r+')
535 535 except IOError:
536 536 fp = self.opener('localtags', 'a')
537 537 else:
538 538 prevtags = fp.read()
539 539
540 540 # local tags are stored in the current charset
541 541 writetags(fp, names, None, prevtags)
542 542 for name in names:
543 543 self.hook('tag', node=hex(node), tag=name, local=local)
544 544 return
545 545
546 546 try:
547 547 fp = self.wfile('.hgtags', 'rb+')
548 548 except IOError, e:
549 549 if e.errno != errno.ENOENT:
550 550 raise
551 551 fp = self.wfile('.hgtags', 'ab')
552 552 else:
553 553 prevtags = fp.read()
554 554
555 555 # committed tags are stored in UTF-8
556 556 writetags(fp, names, encoding.fromlocal, prevtags)
557 557
558 558 fp.close()
559 559
560 560 self.invalidatecaches()
561 561
562 562 if '.hgtags' not in self.dirstate:
563 563 self[None].add(['.hgtags'])
564 564
565 565 m = matchmod.exact(self.root, '', ['.hgtags'])
566 566 tagnode = self.commit(message, user, date, extra=extra, match=m,
567 567 editor=editor)
568 568
569 569 for name in names:
570 570 self.hook('tag', node=hex(node), tag=name, local=local)
571 571
572 572 return tagnode
573 573
574 574 def tag(self, names, node, message, local, user, date, editor=False):
575 575 '''tag a revision with one or more symbolic names.
576 576
577 577 names is a list of strings or, when adding a single tag, names may be a
578 578 string.
579 579
580 580 if local is True, the tags are stored in a per-repository file.
581 581 otherwise, they are stored in the .hgtags file, and a new
582 582 changeset is committed with the change.
583 583
584 584 keyword arguments:
585 585
586 586 local: whether to store tags in non-version-controlled file
587 587 (default False)
588 588
589 589 message: commit message to use if committing
590 590
591 591 user: name of user to use if committing
592 592
593 593 date: date tuple to use if committing'''
594 594
595 595 if not local:
596 596 m = matchmod.exact(self.root, '', ['.hgtags'])
597 597 if util.any(self.status(match=m, unknown=True, ignored=True)):
598 598 raise util.Abort(_('working copy of .hgtags is changed'),
599 599 hint=_('please commit .hgtags manually'))
600 600
601 601 self.tags() # instantiate the cache
602 602 self._tag(names, node, message, local, user, date, editor=editor)
603 603
604 604 @filteredpropertycache
605 605 def _tagscache(self):
606 606 '''Returns a tagscache object that contains various tags related
607 607 caches.'''
608 608
609 609 # This simplifies its cache management by having one decorated
610 610 # function (this one) and the rest simply fetch things from it.
611 611 class tagscache(object):
612 612 def __init__(self):
613 613 # These two define the set of tags for this repository. tags
614 614 # maps tag name to node; tagtypes maps tag name to 'global' or
615 615 # 'local'. (Global tags are defined by .hgtags across all
616 616 # heads, and local tags are defined in .hg/localtags.)
617 617 # They constitute the in-memory cache of tags.
618 618 self.tags = self.tagtypes = None
619 619
620 620 self.nodetagscache = self.tagslist = None
621 621
622 622 cache = tagscache()
623 623 cache.tags, cache.tagtypes = self._findtags()
624 624
625 625 return cache
626 626
627 627 def tags(self):
628 628 '''return a mapping of tag to node'''
629 629 t = {}
630 630 if self.changelog.filteredrevs:
631 631 tags, tt = self._findtags()
632 632 else:
633 633 tags = self._tagscache.tags
634 634 for k, v in tags.iteritems():
635 635 try:
636 636 # ignore tags to unknown nodes
637 637 self.changelog.rev(v)
638 638 t[k] = v
639 639 except (error.LookupError, ValueError):
640 640 pass
641 641 return t
642 642
643 643 def _findtags(self):
644 644 '''Do the hard work of finding tags. Return a pair of dicts
645 645 (tags, tagtypes) where tags maps tag name to node, and tagtypes
646 646 maps tag name to a string like \'global\' or \'local\'.
647 647 Subclasses or extensions are free to add their own tags, but
648 648 should be aware that the returned dicts will be retained for the
649 649 duration of the localrepo object.'''
650 650
651 651 # XXX what tagtype should subclasses/extensions use? Currently
652 652 # mq and bookmarks add tags, but do not set the tagtype at all.
653 653 # Should each extension invent its own tag type? Should there
654 654 # be one tagtype for all such "virtual" tags? Or is the status
655 655 # quo fine?
656 656
657 657 alltags = {} # map tag name to (node, hist)
658 658 tagtypes = {}
659 659
660 660 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
661 661 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
662 662
663 663 # Build the return dicts. Have to re-encode tag names because
664 664 # the tags module always uses UTF-8 (in order not to lose info
665 665 # writing to the cache), but the rest of Mercurial wants them in
666 666 # local encoding.
667 667 tags = {}
668 668 for (name, (node, hist)) in alltags.iteritems():
669 669 if node != nullid:
670 670 tags[encoding.tolocal(name)] = node
671 671 tags['tip'] = self.changelog.tip()
672 672 tagtypes = dict([(encoding.tolocal(name), value)
673 673 for (name, value) in tagtypes.iteritems()])
674 674 return (tags, tagtypes)
675 675
676 676 def tagtype(self, tagname):
677 677 '''
678 678 return the type of the given tag. result can be:
679 679
680 680 'local' : a local tag
681 681 'global' : a global tag
682 682 None : tag does not exist
683 683 '''
684 684
685 685 return self._tagscache.tagtypes.get(tagname)
686 686
687 687 def tagslist(self):
688 688 '''return a list of tags ordered by revision'''
689 689 if not self._tagscache.tagslist:
690 690 l = []
691 691 for t, n in self.tags().iteritems():
692 692 l.append((self.changelog.rev(n), t, n))
693 693 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
694 694
695 695 return self._tagscache.tagslist
696 696
697 697 def nodetags(self, node):
698 698 '''return the tags associated with a node'''
699 699 if not self._tagscache.nodetagscache:
700 700 nodetagscache = {}
701 701 for t, n in self._tagscache.tags.iteritems():
702 702 nodetagscache.setdefault(n, []).append(t)
703 703 for tags in nodetagscache.itervalues():
704 704 tags.sort()
705 705 self._tagscache.nodetagscache = nodetagscache
706 706 return self._tagscache.nodetagscache.get(node, [])
707 707
708 708 def nodebookmarks(self, node):
709 709 marks = []
710 710 for bookmark, n in self._bookmarks.iteritems():
711 711 if n == node:
712 712 marks.append(bookmark)
713 713 return sorted(marks)
714 714
715 715 def branchmap(self):
716 716 '''returns a dictionary {branch: [branchheads]} with branchheads
717 717 ordered by increasing revision number'''
718 718 branchmap.updatecache(self)
719 719 return self._branchcaches[self.filtername]
720 720
721 721 def branchtip(self, branch):
722 722 '''return the tip node for a given branch'''
723 723 try:
724 724 return self.branchmap().branchtip(branch)
725 725 except KeyError:
726 726 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
727 727
728 728 def lookup(self, key):
729 729 return self[key].node()
730 730
731 731 def lookupbranch(self, key, remote=None):
732 732 repo = remote or self
733 733 if key in repo.branchmap():
734 734 return key
735 735
736 736 repo = (remote and remote.local()) and remote or self
737 737 return repo[key].branch()
738 738
739 739 def known(self, nodes):
740 740 nm = self.changelog.nodemap
741 741 pc = self._phasecache
742 742 result = []
743 743 for n in nodes:
744 744 r = nm.get(n)
745 745 resp = not (r is None or pc.phase(self, r) >= phases.secret)
746 746 result.append(resp)
747 747 return result
748 748
749 749 def local(self):
750 750 return self
751 751
752 752 def cancopy(self):
753 753 # so statichttprepo's override of local() works
754 754 if not self.local():
755 755 return False
756 756 if not self.ui.configbool('phases', 'publish', True):
757 757 return True
758 758 # if publishing we can't copy if there is filtered content
759 759 return not self.filtered('visible').changelog.filteredrevs
760 760
761 def shared(self):
762 '''the type of shared repository (None if not shared)'''
763 if self.sharedpath != self.path:
764 return 'store'
765 return None
766
761 767 def join(self, f, *insidef):
762 768 return os.path.join(self.path, f, *insidef)
763 769
764 770 def wjoin(self, f, *insidef):
765 771 return os.path.join(self.root, f, *insidef)
766 772
767 773 def file(self, f):
768 774 if f[0] == '/':
769 775 f = f[1:]
770 776 return filelog.filelog(self.sopener, f)
771 777
772 778 def changectx(self, changeid):
773 779 return self[changeid]
774 780
775 781 def parents(self, changeid=None):
776 782 '''get list of changectxs for parents of changeid'''
777 783 return self[changeid].parents()
778 784
779 785 def setparents(self, p1, p2=nullid):
780 786 self.dirstate.beginparentchange()
781 787 copies = self.dirstate.setparents(p1, p2)
782 788 pctx = self[p1]
783 789 if copies:
784 790 # Adjust copy records, the dirstate cannot do it, it
785 791 # requires access to parents manifests. Preserve them
786 792 # only for entries added to first parent.
787 793 for f in copies:
788 794 if f not in pctx and copies[f] in pctx:
789 795 self.dirstate.copy(copies[f], f)
790 796 if p2 == nullid:
791 797 for f, s in sorted(self.dirstate.copies().items()):
792 798 if f not in pctx and s not in pctx:
793 799 self.dirstate.copy(None, f)
794 800 self.dirstate.endparentchange()
795 801
796 802 def filectx(self, path, changeid=None, fileid=None):
797 803 """changeid can be a changeset revision, node, or tag.
798 804 fileid can be a file revision or node."""
799 805 return context.filectx(self, path, changeid, fileid)
800 806
801 807 def getcwd(self):
802 808 return self.dirstate.getcwd()
803 809
804 810 def pathto(self, f, cwd=None):
805 811 return self.dirstate.pathto(f, cwd)
806 812
807 813 def wfile(self, f, mode='r'):
808 814 return self.wopener(f, mode)
809 815
810 816 def _link(self, f):
811 817 return self.wvfs.islink(f)
812 818
813 819 def _loadfilter(self, filter):
814 820 if filter not in self.filterpats:
815 821 l = []
816 822 for pat, cmd in self.ui.configitems(filter):
817 823 if cmd == '!':
818 824 continue
819 825 mf = matchmod.match(self.root, '', [pat])
820 826 fn = None
821 827 params = cmd
822 828 for name, filterfn in self._datafilters.iteritems():
823 829 if cmd.startswith(name):
824 830 fn = filterfn
825 831 params = cmd[len(name):].lstrip()
826 832 break
827 833 if not fn:
828 834 fn = lambda s, c, **kwargs: util.filter(s, c)
829 835 # Wrap old filters not supporting keyword arguments
830 836 if not inspect.getargspec(fn)[2]:
831 837 oldfn = fn
832 838 fn = lambda s, c, **kwargs: oldfn(s, c)
833 839 l.append((mf, fn, params))
834 840 self.filterpats[filter] = l
835 841 return self.filterpats[filter]
836 842
837 843 def _filter(self, filterpats, filename, data):
838 844 for mf, fn, cmd in filterpats:
839 845 if mf(filename):
840 846 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
841 847 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
842 848 break
843 849
844 850 return data
845 851
846 852 @unfilteredpropertycache
847 853 def _encodefilterpats(self):
848 854 return self._loadfilter('encode')
849 855
850 856 @unfilteredpropertycache
851 857 def _decodefilterpats(self):
852 858 return self._loadfilter('decode')
853 859
854 860 def adddatafilter(self, name, filter):
855 861 self._datafilters[name] = filter
856 862
857 863 def wread(self, filename):
858 864 if self._link(filename):
859 865 data = self.wvfs.readlink(filename)
860 866 else:
861 867 data = self.wopener.read(filename)
862 868 return self._filter(self._encodefilterpats, filename, data)
863 869
864 870 def wwrite(self, filename, data, flags):
865 871 data = self._filter(self._decodefilterpats, filename, data)
866 872 if 'l' in flags:
867 873 self.wopener.symlink(data, filename)
868 874 else:
869 875 self.wopener.write(filename, data)
870 876 if 'x' in flags:
871 877 self.wvfs.setflags(filename, False, True)
872 878
873 879 def wwritedata(self, filename, data):
874 880 return self._filter(self._decodefilterpats, filename, data)
875 881
876 882 def currenttransaction(self):
877 883 """return the current transaction or None if non exists"""
878 884 tr = self._transref and self._transref() or None
879 885 if tr and tr.running():
880 886 return tr
881 887 return None
882 888
883 889 def transaction(self, desc, report=None):
884 890 tr = self.currenttransaction()
885 891 if tr is not None:
886 892 return tr.nest()
887 893
888 894 # abort here if the journal already exists
889 895 if self.svfs.exists("journal"):
890 896 raise error.RepoError(
891 897 _("abandoned transaction found"),
892 898 hint=_("run 'hg recover' to clean up transaction"))
893 899
894 900 self._writejournal(desc)
895 901 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
896 902 rp = report and report or self.ui.warn
897 903 vfsmap = {'plain': self.opener} # root of .hg/
898 904 tr = transaction.transaction(rp, self.sopener, vfsmap,
899 905 "journal",
900 906 aftertrans(renames),
901 907 self.store.createmode)
902 908 # note: writing the fncache only during finalize mean that the file is
903 909 # outdated when running hooks. As fncache is used for streaming clone,
904 910 # this is not expected to break anything that happen during the hooks.
905 911 tr.addfinalize('flush-fncache', self.store.write)
906 912 self._transref = weakref.ref(tr)
907 913 return tr
908 914
909 915 def _journalfiles(self):
910 916 return ((self.svfs, 'journal'),
911 917 (self.vfs, 'journal.dirstate'),
912 918 (self.vfs, 'journal.branch'),
913 919 (self.vfs, 'journal.desc'),
914 920 (self.vfs, 'journal.bookmarks'),
915 921 (self.svfs, 'journal.phaseroots'))
916 922
917 923 def undofiles(self):
918 924 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
919 925
920 926 def _writejournal(self, desc):
921 927 self.opener.write("journal.dirstate",
922 928 self.opener.tryread("dirstate"))
923 929 self.opener.write("journal.branch",
924 930 encoding.fromlocal(self.dirstate.branch()))
925 931 self.opener.write("journal.desc",
926 932 "%d\n%s\n" % (len(self), desc))
927 933 self.opener.write("journal.bookmarks",
928 934 self.opener.tryread("bookmarks"))
929 935 self.sopener.write("journal.phaseroots",
930 936 self.sopener.tryread("phaseroots"))
931 937
932 938 def recover(self):
933 939 lock = self.lock()
934 940 try:
935 941 if self.svfs.exists("journal"):
936 942 self.ui.status(_("rolling back interrupted transaction\n"))
937 943 vfsmap = {'': self.sopener,
938 944 'plain': self.opener,}
939 945 transaction.rollback(self.sopener, vfsmap, "journal",
940 946 self.ui.warn)
941 947 self.invalidate()
942 948 return True
943 949 else:
944 950 self.ui.warn(_("no interrupted transaction available\n"))
945 951 return False
946 952 finally:
947 953 lock.release()
948 954
949 955 def rollback(self, dryrun=False, force=False):
950 956 wlock = lock = None
951 957 try:
952 958 wlock = self.wlock()
953 959 lock = self.lock()
954 960 if self.svfs.exists("undo"):
955 961 return self._rollback(dryrun, force)
956 962 else:
957 963 self.ui.warn(_("no rollback information available\n"))
958 964 return 1
959 965 finally:
960 966 release(lock, wlock)
961 967
962 968 @unfilteredmethod # Until we get smarter cache management
963 969 def _rollback(self, dryrun, force):
964 970 ui = self.ui
965 971 try:
966 972 args = self.opener.read('undo.desc').splitlines()
967 973 (oldlen, desc, detail) = (int(args[0]), args[1], None)
968 974 if len(args) >= 3:
969 975 detail = args[2]
970 976 oldtip = oldlen - 1
971 977
972 978 if detail and ui.verbose:
973 979 msg = (_('repository tip rolled back to revision %s'
974 980 ' (undo %s: %s)\n')
975 981 % (oldtip, desc, detail))
976 982 else:
977 983 msg = (_('repository tip rolled back to revision %s'
978 984 ' (undo %s)\n')
979 985 % (oldtip, desc))
980 986 except IOError:
981 987 msg = _('rolling back unknown transaction\n')
982 988 desc = None
983 989
984 990 if not force and self['.'] != self['tip'] and desc == 'commit':
985 991 raise util.Abort(
986 992 _('rollback of last commit while not checked out '
987 993 'may lose data'), hint=_('use -f to force'))
988 994
989 995 ui.status(msg)
990 996 if dryrun:
991 997 return 0
992 998
993 999 parents = self.dirstate.parents()
994 1000 self.destroying()
995 1001 vfsmap = {'plain': self.opener}
996 1002 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
997 1003 if self.vfs.exists('undo.bookmarks'):
998 1004 self.vfs.rename('undo.bookmarks', 'bookmarks')
999 1005 if self.svfs.exists('undo.phaseroots'):
1000 1006 self.svfs.rename('undo.phaseroots', 'phaseroots')
1001 1007 self.invalidate()
1002 1008
1003 1009 parentgone = (parents[0] not in self.changelog.nodemap or
1004 1010 parents[1] not in self.changelog.nodemap)
1005 1011 if parentgone:
1006 1012 self.vfs.rename('undo.dirstate', 'dirstate')
1007 1013 try:
1008 1014 branch = self.opener.read('undo.branch')
1009 1015 self.dirstate.setbranch(encoding.tolocal(branch))
1010 1016 except IOError:
1011 1017 ui.warn(_('named branch could not be reset: '
1012 1018 'current branch is still \'%s\'\n')
1013 1019 % self.dirstate.branch())
1014 1020
1015 1021 self.dirstate.invalidate()
1016 1022 parents = tuple([p.rev() for p in self.parents()])
1017 1023 if len(parents) > 1:
1018 1024 ui.status(_('working directory now based on '
1019 1025 'revisions %d and %d\n') % parents)
1020 1026 else:
1021 1027 ui.status(_('working directory now based on '
1022 1028 'revision %d\n') % parents)
1023 1029 # TODO: if we know which new heads may result from this rollback, pass
1024 1030 # them to destroy(), which will prevent the branchhead cache from being
1025 1031 # invalidated.
1026 1032 self.destroyed()
1027 1033 return 0
1028 1034
1029 1035 def invalidatecaches(self):
1030 1036
1031 1037 if '_tagscache' in vars(self):
1032 1038 # can't use delattr on proxy
1033 1039 del self.__dict__['_tagscache']
1034 1040
1035 1041 self.unfiltered()._branchcaches.clear()
1036 1042 self.invalidatevolatilesets()
1037 1043
1038 1044 def invalidatevolatilesets(self):
1039 1045 self.filteredrevcache.clear()
1040 1046 obsolete.clearobscaches(self)
1041 1047
1042 1048 def invalidatedirstate(self):
1043 1049 '''Invalidates the dirstate, causing the next call to dirstate
1044 1050 to check if it was modified since the last time it was read,
1045 1051 rereading it if it has.
1046 1052
1047 1053 This is different to dirstate.invalidate() that it doesn't always
1048 1054 rereads the dirstate. Use dirstate.invalidate() if you want to
1049 1055 explicitly read the dirstate again (i.e. restoring it to a previous
1050 1056 known good state).'''
1051 1057 if hasunfilteredcache(self, 'dirstate'):
1052 1058 for k in self.dirstate._filecache:
1053 1059 try:
1054 1060 delattr(self.dirstate, k)
1055 1061 except AttributeError:
1056 1062 pass
1057 1063 delattr(self.unfiltered(), 'dirstate')
1058 1064
1059 1065 def invalidate(self):
1060 1066 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1061 1067 for k in self._filecache:
1062 1068 # dirstate is invalidated separately in invalidatedirstate()
1063 1069 if k == 'dirstate':
1064 1070 continue
1065 1071
1066 1072 try:
1067 1073 delattr(unfiltered, k)
1068 1074 except AttributeError:
1069 1075 pass
1070 1076 self.invalidatecaches()
1071 1077 self.store.invalidatecaches()
1072 1078
1073 1079 def invalidateall(self):
1074 1080 '''Fully invalidates both store and non-store parts, causing the
1075 1081 subsequent operation to reread any outside changes.'''
1076 1082 # extension should hook this to invalidate its caches
1077 1083 self.invalidate()
1078 1084 self.invalidatedirstate()
1079 1085
1080 1086 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1081 1087 try:
1082 1088 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1083 1089 except error.LockHeld, inst:
1084 1090 if not wait:
1085 1091 raise
1086 1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1087 1093 (desc, inst.locker))
1088 1094 # default to 600 seconds timeout
1089 1095 l = lockmod.lock(vfs, lockname,
1090 1096 int(self.ui.config("ui", "timeout", "600")),
1091 1097 releasefn, desc=desc)
1092 1098 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1093 1099 if acquirefn:
1094 1100 acquirefn()
1095 1101 return l
1096 1102
1097 1103 def _afterlock(self, callback):
1098 1104 """add a callback to the current repository lock.
1099 1105
1100 1106 The callback will be executed on lock release."""
1101 1107 l = self._lockref and self._lockref()
1102 1108 if l:
1103 1109 l.postrelease.append(callback)
1104 1110 else:
1105 1111 callback()
1106 1112
1107 1113 def lock(self, wait=True):
1108 1114 '''Lock the repository store (.hg/store) and return a weak reference
1109 1115 to the lock. Use this before modifying the store (e.g. committing or
1110 1116 stripping). If you are opening a transaction, get a lock as well.)'''
1111 1117 l = self._lockref and self._lockref()
1112 1118 if l is not None and l.held:
1113 1119 l.lock()
1114 1120 return l
1115 1121
1116 1122 def unlock():
1117 1123 for k, ce in self._filecache.items():
1118 1124 if k == 'dirstate' or k not in self.__dict__:
1119 1125 continue
1120 1126 ce.refresh()
1121 1127
1122 1128 l = self._lock(self.svfs, "lock", wait, unlock,
1123 1129 self.invalidate, _('repository %s') % self.origroot)
1124 1130 self._lockref = weakref.ref(l)
1125 1131 return l
1126 1132
1127 1133 def wlock(self, wait=True):
1128 1134 '''Lock the non-store parts of the repository (everything under
1129 1135 .hg except .hg/store) and return a weak reference to the lock.
1130 1136 Use this before modifying files in .hg.'''
1131 1137 l = self._wlockref and self._wlockref()
1132 1138 if l is not None and l.held:
1133 1139 l.lock()
1134 1140 return l
1135 1141
1136 1142 def unlock():
1137 1143 if self.dirstate.pendingparentchange():
1138 1144 self.dirstate.invalidate()
1139 1145 else:
1140 1146 self.dirstate.write()
1141 1147
1142 1148 self._filecache['dirstate'].refresh()
1143 1149
1144 1150 l = self._lock(self.vfs, "wlock", wait, unlock,
1145 1151 self.invalidatedirstate, _('working directory of %s') %
1146 1152 self.origroot)
1147 1153 self._wlockref = weakref.ref(l)
1148 1154 return l
1149 1155
1150 1156 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1151 1157 """
1152 1158 commit an individual file as part of a larger transaction
1153 1159 """
1154 1160
1155 1161 fname = fctx.path()
1156 1162 text = fctx.data()
1157 1163 flog = self.file(fname)
1158 1164 fparent1 = manifest1.get(fname, nullid)
1159 1165 fparent2 = manifest2.get(fname, nullid)
1160 1166
1161 1167 meta = {}
1162 1168 copy = fctx.renamed()
1163 1169 if copy and copy[0] != fname:
1164 1170 # Mark the new revision of this file as a copy of another
1165 1171 # file. This copy data will effectively act as a parent
1166 1172 # of this new revision. If this is a merge, the first
1167 1173 # parent will be the nullid (meaning "look up the copy data")
1168 1174 # and the second one will be the other parent. For example:
1169 1175 #
1170 1176 # 0 --- 1 --- 3 rev1 changes file foo
1171 1177 # \ / rev2 renames foo to bar and changes it
1172 1178 # \- 2 -/ rev3 should have bar with all changes and
1173 1179 # should record that bar descends from
1174 1180 # bar in rev2 and foo in rev1
1175 1181 #
1176 1182 # this allows this merge to succeed:
1177 1183 #
1178 1184 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1179 1185 # \ / merging rev3 and rev4 should use bar@rev2
1180 1186 # \- 2 --- 4 as the merge base
1181 1187 #
1182 1188
1183 1189 cfname = copy[0]
1184 1190 crev = manifest1.get(cfname)
1185 1191 newfparent = fparent2
1186 1192
1187 1193 if manifest2: # branch merge
1188 1194 if fparent2 == nullid or crev is None: # copied on remote side
1189 1195 if cfname in manifest2:
1190 1196 crev = manifest2[cfname]
1191 1197 newfparent = fparent1
1192 1198
1193 1199 # find source in nearest ancestor if we've lost track
1194 1200 if not crev:
1195 1201 self.ui.debug(" %s: searching for copy revision for %s\n" %
1196 1202 (fname, cfname))
1197 1203 for ancestor in self[None].ancestors():
1198 1204 if cfname in ancestor:
1199 1205 crev = ancestor[cfname].filenode()
1200 1206 break
1201 1207
1202 1208 if crev:
1203 1209 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1204 1210 meta["copy"] = cfname
1205 1211 meta["copyrev"] = hex(crev)
1206 1212 fparent1, fparent2 = nullid, newfparent
1207 1213 else:
1208 1214 self.ui.warn(_("warning: can't find ancestor for '%s' "
1209 1215 "copied from '%s'!\n") % (fname, cfname))
1210 1216
1211 1217 elif fparent1 == nullid:
1212 1218 fparent1, fparent2 = fparent2, nullid
1213 1219 elif fparent2 != nullid:
1214 1220 # is one parent an ancestor of the other?
1215 1221 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1216 1222 if fparent1 in fparentancestors:
1217 1223 fparent1, fparent2 = fparent2, nullid
1218 1224 elif fparent2 in fparentancestors:
1219 1225 fparent2 = nullid
1220 1226
1221 1227 # is the file changed?
1222 1228 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1223 1229 changelist.append(fname)
1224 1230 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1225 1231 # are just the flags changed during merge?
1226 1232 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1227 1233 changelist.append(fname)
1228 1234
1229 1235 return fparent1
1230 1236
1231 1237 @unfilteredmethod
1232 1238 def commit(self, text="", user=None, date=None, match=None, force=False,
1233 1239 editor=False, extra={}):
1234 1240 """Add a new revision to current repository.
1235 1241
1236 1242 Revision information is gathered from the working directory,
1237 1243 match can be used to filter the committed files. If editor is
1238 1244 supplied, it is called to get a commit message.
1239 1245 """
1240 1246
1241 1247 def fail(f, msg):
1242 1248 raise util.Abort('%s: %s' % (f, msg))
1243 1249
1244 1250 if not match:
1245 1251 match = matchmod.always(self.root, '')
1246 1252
1247 1253 if not force:
1248 1254 vdirs = []
1249 1255 match.explicitdir = vdirs.append
1250 1256 match.bad = fail
1251 1257
1252 1258 wlock = self.wlock()
1253 1259 try:
1254 1260 wctx = self[None]
1255 1261 merge = len(wctx.parents()) > 1
1256 1262
1257 1263 if (not force and merge and match and
1258 1264 (match.files() or match.anypats())):
1259 1265 raise util.Abort(_('cannot partially commit a merge '
1260 1266 '(do not specify files or patterns)'))
1261 1267
1262 1268 status = self.status(match=match, clean=force)
1263 1269 if force:
1264 1270 status.modified.extend(status.clean) # mq may commit clean files
1265 1271
1266 1272 # check subrepos
1267 1273 subs = []
1268 1274 commitsubs = set()
1269 1275 newstate = wctx.substate.copy()
1270 1276 # only manage subrepos and .hgsubstate if .hgsub is present
1271 1277 if '.hgsub' in wctx:
1272 1278 # we'll decide whether to track this ourselves, thanks
1273 1279 for c in status.modified, status.added, status.removed:
1274 1280 if '.hgsubstate' in c:
1275 1281 c.remove('.hgsubstate')
1276 1282
1277 1283 # compare current state to last committed state
1278 1284 # build new substate based on last committed state
1279 1285 oldstate = wctx.p1().substate
1280 1286 for s in sorted(newstate.keys()):
1281 1287 if not match(s):
1282 1288 # ignore working copy, use old state if present
1283 1289 if s in oldstate:
1284 1290 newstate[s] = oldstate[s]
1285 1291 continue
1286 1292 if not force:
1287 1293 raise util.Abort(
1288 1294 _("commit with new subrepo %s excluded") % s)
1289 1295 if wctx.sub(s).dirty(True):
1290 1296 if not self.ui.configbool('ui', 'commitsubrepos'):
1291 1297 raise util.Abort(
1292 1298 _("uncommitted changes in subrepo %s") % s,
1293 1299 hint=_("use --subrepos for recursive commit"))
1294 1300 subs.append(s)
1295 1301 commitsubs.add(s)
1296 1302 else:
1297 1303 bs = wctx.sub(s).basestate()
1298 1304 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1299 1305 if oldstate.get(s, (None, None, None))[1] != bs:
1300 1306 subs.append(s)
1301 1307
1302 1308 # check for removed subrepos
1303 1309 for p in wctx.parents():
1304 1310 r = [s for s in p.substate if s not in newstate]
1305 1311 subs += [s for s in r if match(s)]
1306 1312 if subs:
1307 1313 if (not match('.hgsub') and
1308 1314 '.hgsub' in (wctx.modified() + wctx.added())):
1309 1315 raise util.Abort(
1310 1316 _("can't commit subrepos without .hgsub"))
1311 1317 status.modified.insert(0, '.hgsubstate')
1312 1318
1313 1319 elif '.hgsub' in status.removed:
1314 1320 # clean up .hgsubstate when .hgsub is removed
1315 1321 if ('.hgsubstate' in wctx and
1316 1322 '.hgsubstate' not in (status.modified + status.added +
1317 1323 status.removed)):
1318 1324 status.removed.insert(0, '.hgsubstate')
1319 1325
1320 1326 # make sure all explicit patterns are matched
1321 1327 if not force and match.files():
1322 1328 matched = set(status.modified + status.added + status.removed)
1323 1329
1324 1330 for f in match.files():
1325 1331 f = self.dirstate.normalize(f)
1326 1332 if f == '.' or f in matched or f in wctx.substate:
1327 1333 continue
1328 1334 if f in status.deleted:
1329 1335 fail(f, _('file not found!'))
1330 1336 if f in vdirs: # visited directory
1331 1337 d = f + '/'
1332 1338 for mf in matched:
1333 1339 if mf.startswith(d):
1334 1340 break
1335 1341 else:
1336 1342 fail(f, _("no match under directory!"))
1337 1343 elif f not in self.dirstate:
1338 1344 fail(f, _("file not tracked!"))
1339 1345
1340 1346 cctx = context.workingctx(self, text, user, date, extra, status)
1341 1347
1342 1348 if (not force and not extra.get("close") and not merge
1343 1349 and not cctx.files()
1344 1350 and wctx.branch() == wctx.p1().branch()):
1345 1351 return None
1346 1352
1347 1353 if merge and cctx.deleted():
1348 1354 raise util.Abort(_("cannot commit merge with missing files"))
1349 1355
1350 1356 ms = mergemod.mergestate(self)
1351 1357 for f in status.modified:
1352 1358 if f in ms and ms[f] == 'u':
1353 1359 raise util.Abort(_("unresolved merge conflicts "
1354 1360 "(see hg help resolve)"))
1355 1361
1356 1362 if editor:
1357 1363 cctx._text = editor(self, cctx, subs)
1358 1364 edited = (text != cctx._text)
1359 1365
1360 1366 # Save commit message in case this transaction gets rolled back
1361 1367 # (e.g. by a pretxncommit hook). Leave the content alone on
1362 1368 # the assumption that the user will use the same editor again.
1363 1369 msgfn = self.savecommitmessage(cctx._text)
1364 1370
1365 1371 # commit subs and write new state
1366 1372 if subs:
1367 1373 for s in sorted(commitsubs):
1368 1374 sub = wctx.sub(s)
1369 1375 self.ui.status(_('committing subrepository %s\n') %
1370 1376 subrepo.subrelpath(sub))
1371 1377 sr = sub.commit(cctx._text, user, date)
1372 1378 newstate[s] = (newstate[s][0], sr)
1373 1379 subrepo.writestate(self, newstate)
1374 1380
1375 1381 p1, p2 = self.dirstate.parents()
1376 1382 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1377 1383 try:
1378 1384 self.hook("precommit", throw=True, parent1=hookp1,
1379 1385 parent2=hookp2)
1380 1386 ret = self.commitctx(cctx, True)
1381 1387 except: # re-raises
1382 1388 if edited:
1383 1389 self.ui.write(
1384 1390 _('note: commit message saved in %s\n') % msgfn)
1385 1391 raise
1386 1392
1387 1393 # update bookmarks, dirstate and mergestate
1388 1394 bookmarks.update(self, [p1, p2], ret)
1389 1395 cctx.markcommitted(ret)
1390 1396 ms.reset()
1391 1397 finally:
1392 1398 wlock.release()
1393 1399
1394 1400 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1395 1401 # hack for command that use a temporary commit (eg: histedit)
1396 1402 # temporary commit got stripped before hook release
1397 1403 if node in self:
1398 1404 self.hook("commit", node=node, parent1=parent1,
1399 1405 parent2=parent2)
1400 1406 self._afterlock(commithook)
1401 1407 return ret
1402 1408
1403 1409 @unfilteredmethod
1404 1410 def commitctx(self, ctx, error=False):
1405 1411 """Add a new revision to current repository.
1406 1412 Revision information is passed via the context argument.
1407 1413 """
1408 1414
1409 1415 tr = None
1410 1416 p1, p2 = ctx.p1(), ctx.p2()
1411 1417 user = ctx.user()
1412 1418
1413 1419 lock = self.lock()
1414 1420 try:
1415 1421 tr = self.transaction("commit")
1416 1422 trp = weakref.proxy(tr)
1417 1423
1418 1424 if ctx.files():
1419 1425 m1 = p1.manifest()
1420 1426 m2 = p2.manifest()
1421 1427 m = m1.copy()
1422 1428
1423 1429 # check in files
1424 1430 added = []
1425 1431 changed = []
1426 1432 removed = list(ctx.removed())
1427 1433 linkrev = len(self)
1428 1434 for f in sorted(ctx.modified() + ctx.added()):
1429 1435 self.ui.note(f + "\n")
1430 1436 try:
1431 1437 fctx = ctx[f]
1432 1438 if fctx is None:
1433 1439 removed.append(f)
1434 1440 else:
1435 1441 added.append(f)
1436 1442 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1437 1443 trp, changed)
1438 1444 m.setflag(f, fctx.flags())
1439 1445 except OSError, inst:
1440 1446 self.ui.warn(_("trouble committing %s!\n") % f)
1441 1447 raise
1442 1448 except IOError, inst:
1443 1449 errcode = getattr(inst, 'errno', errno.ENOENT)
1444 1450 if error or errcode and errcode != errno.ENOENT:
1445 1451 self.ui.warn(_("trouble committing %s!\n") % f)
1446 1452 raise
1447 1453
1448 1454 # update manifest
1449 1455 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1450 1456 drop = [f for f in removed if f in m]
1451 1457 for f in drop:
1452 1458 del m[f]
1453 1459 mn = self.manifest.add(m, trp, linkrev,
1454 1460 p1.manifestnode(), p2.manifestnode(),
1455 1461 added, drop)
1456 1462 files = changed + removed
1457 1463 else:
1458 1464 mn = p1.manifestnode()
1459 1465 files = []
1460 1466
1461 1467 # update changelog
1462 1468 self.changelog.delayupdate(tr)
1463 1469 n = self.changelog.add(mn, files, ctx.description(),
1464 1470 trp, p1.node(), p2.node(),
1465 1471 user, ctx.date(), ctx.extra().copy())
1466 1472 p = lambda: tr.writepending() and self.root or ""
1467 1473 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1468 1474 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1469 1475 parent2=xp2, pending=p)
1470 1476 # set the new commit is proper phase
1471 1477 targetphase = subrepo.newcommitphase(self.ui, ctx)
1472 1478 if targetphase:
1473 1479 # retract boundary do not alter parent changeset.
1474 1480 # if a parent have higher the resulting phase will
1475 1481 # be compliant anyway
1476 1482 #
1477 1483 # if minimal phase was 0 we don't need to retract anything
1478 1484 phases.retractboundary(self, tr, targetphase, [n])
1479 1485 tr.close()
1480 1486 branchmap.updatecache(self.filtered('served'))
1481 1487 return n
1482 1488 finally:
1483 1489 if tr:
1484 1490 tr.release()
1485 1491 lock.release()
1486 1492
1487 1493 @unfilteredmethod
1488 1494 def destroying(self):
1489 1495 '''Inform the repository that nodes are about to be destroyed.
1490 1496 Intended for use by strip and rollback, so there's a common
1491 1497 place for anything that has to be done before destroying history.
1492 1498
1493 1499 This is mostly useful for saving state that is in memory and waiting
1494 1500 to be flushed when the current lock is released. Because a call to
1495 1501 destroyed is imminent, the repo will be invalidated causing those
1496 1502 changes to stay in memory (waiting for the next unlock), or vanish
1497 1503 completely.
1498 1504 '''
1499 1505 # When using the same lock to commit and strip, the phasecache is left
1500 1506 # dirty after committing. Then when we strip, the repo is invalidated,
1501 1507 # causing those changes to disappear.
1502 1508 if '_phasecache' in vars(self):
1503 1509 self._phasecache.write()
1504 1510
1505 1511 @unfilteredmethod
1506 1512 def destroyed(self):
1507 1513 '''Inform the repository that nodes have been destroyed.
1508 1514 Intended for use by strip and rollback, so there's a common
1509 1515 place for anything that has to be done after destroying history.
1510 1516 '''
1511 1517 # When one tries to:
1512 1518 # 1) destroy nodes thus calling this method (e.g. strip)
1513 1519 # 2) use phasecache somewhere (e.g. commit)
1514 1520 #
1515 1521 # then 2) will fail because the phasecache contains nodes that were
1516 1522 # removed. We can either remove phasecache from the filecache,
1517 1523 # causing it to reload next time it is accessed, or simply filter
1518 1524 # the removed nodes now and write the updated cache.
1519 1525 self._phasecache.filterunknown(self)
1520 1526 self._phasecache.write()
1521 1527
1522 1528 # update the 'served' branch cache to help read only server process
1523 1529 # Thanks to branchcache collaboration this is done from the nearest
1524 1530 # filtered subset and it is expected to be fast.
1525 1531 branchmap.updatecache(self.filtered('served'))
1526 1532
1527 1533 # Ensure the persistent tag cache is updated. Doing it now
1528 1534 # means that the tag cache only has to worry about destroyed
1529 1535 # heads immediately after a strip/rollback. That in turn
1530 1536 # guarantees that "cachetip == currenttip" (comparing both rev
1531 1537 # and node) always means no nodes have been added or destroyed.
1532 1538
1533 1539 # XXX this is suboptimal when qrefresh'ing: we strip the current
1534 1540 # head, refresh the tag cache, then immediately add a new head.
1535 1541 # But I think doing it this way is necessary for the "instant
1536 1542 # tag cache retrieval" case to work.
1537 1543 self.invalidate()
1538 1544
1539 1545 def walk(self, match, node=None):
1540 1546 '''
1541 1547 walk recursively through the directory tree or a given
1542 1548 changeset, finding all files matched by the match
1543 1549 function
1544 1550 '''
1545 1551 return self[node].walk(match)
1546 1552
1547 1553 def status(self, node1='.', node2=None, match=None,
1548 1554 ignored=False, clean=False, unknown=False,
1549 1555 listsubrepos=False):
1550 1556 '''a convenience method that calls node1.status(node2)'''
1551 1557 return self[node1].status(node2, match, ignored, clean, unknown,
1552 1558 listsubrepos)
1553 1559
1554 1560 def heads(self, start=None):
1555 1561 heads = self.changelog.heads(start)
1556 1562 # sort the output in rev descending order
1557 1563 return sorted(heads, key=self.changelog.rev, reverse=True)
1558 1564
1559 1565 def branchheads(self, branch=None, start=None, closed=False):
1560 1566 '''return a (possibly filtered) list of heads for the given branch
1561 1567
1562 1568 Heads are returned in topological order, from newest to oldest.
1563 1569 If branch is None, use the dirstate branch.
1564 1570 If start is not None, return only heads reachable from start.
1565 1571 If closed is True, return heads that are marked as closed as well.
1566 1572 '''
1567 1573 if branch is None:
1568 1574 branch = self[None].branch()
1569 1575 branches = self.branchmap()
1570 1576 if branch not in branches:
1571 1577 return []
1572 1578 # the cache returns heads ordered lowest to highest
1573 1579 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1574 1580 if start is not None:
1575 1581 # filter out the heads that cannot be reached from startrev
1576 1582 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1577 1583 bheads = [h for h in bheads if h in fbheads]
1578 1584 return bheads
1579 1585
1580 1586 def branches(self, nodes):
1581 1587 if not nodes:
1582 1588 nodes = [self.changelog.tip()]
1583 1589 b = []
1584 1590 for n in nodes:
1585 1591 t = n
1586 1592 while True:
1587 1593 p = self.changelog.parents(n)
1588 1594 if p[1] != nullid or p[0] == nullid:
1589 1595 b.append((t, n, p[0], p[1]))
1590 1596 break
1591 1597 n = p[0]
1592 1598 return b
1593 1599
1594 1600 def between(self, pairs):
1595 1601 r = []
1596 1602
1597 1603 for top, bottom in pairs:
1598 1604 n, l, i = top, [], 0
1599 1605 f = 1
1600 1606
1601 1607 while n != bottom and n != nullid:
1602 1608 p = self.changelog.parents(n)[0]
1603 1609 if i == f:
1604 1610 l.append(n)
1605 1611 f = f * 2
1606 1612 n = p
1607 1613 i += 1
1608 1614
1609 1615 r.append(l)
1610 1616
1611 1617 return r
1612 1618
1613 1619 def checkpush(self, pushop):
1614 1620 """Extensions can override this function if additional checks have
1615 1621 to be performed before pushing, or call it if they override push
1616 1622 command.
1617 1623 """
1618 1624 pass
1619 1625
1620 1626 @unfilteredpropertycache
1621 1627 def prepushoutgoinghooks(self):
1622 1628 """Return util.hooks consists of "(repo, remote, outgoing)"
1623 1629 functions, which are called before pushing changesets.
1624 1630 """
1625 1631 return util.hooks()
1626 1632
1627 1633 def stream_in(self, remote, requirements):
1628 1634 lock = self.lock()
1629 1635 try:
1630 1636 # Save remote branchmap. We will use it later
1631 1637 # to speed up branchcache creation
1632 1638 rbranchmap = None
1633 1639 if remote.capable("branchmap"):
1634 1640 rbranchmap = remote.branchmap()
1635 1641
1636 1642 fp = remote.stream_out()
1637 1643 l = fp.readline()
1638 1644 try:
1639 1645 resp = int(l)
1640 1646 except ValueError:
1641 1647 raise error.ResponseError(
1642 1648 _('unexpected response from remote server:'), l)
1643 1649 if resp == 1:
1644 1650 raise util.Abort(_('operation forbidden by server'))
1645 1651 elif resp == 2:
1646 1652 raise util.Abort(_('locking the remote repository failed'))
1647 1653 elif resp != 0:
1648 1654 raise util.Abort(_('the server sent an unknown error code'))
1649 1655 self.ui.status(_('streaming all changes\n'))
1650 1656 l = fp.readline()
1651 1657 try:
1652 1658 total_files, total_bytes = map(int, l.split(' ', 1))
1653 1659 except (ValueError, TypeError):
1654 1660 raise error.ResponseError(
1655 1661 _('unexpected response from remote server:'), l)
1656 1662 self.ui.status(_('%d files to transfer, %s of data\n') %
1657 1663 (total_files, util.bytecount(total_bytes)))
1658 1664 handled_bytes = 0
1659 1665 self.ui.progress(_('clone'), 0, total=total_bytes)
1660 1666 start = time.time()
1661 1667
1662 1668 tr = self.transaction(_('clone'))
1663 1669 try:
1664 1670 for i in xrange(total_files):
1665 1671 # XXX doesn't support '\n' or '\r' in filenames
1666 1672 l = fp.readline()
1667 1673 try:
1668 1674 name, size = l.split('\0', 1)
1669 1675 size = int(size)
1670 1676 except (ValueError, TypeError):
1671 1677 raise error.ResponseError(
1672 1678 _('unexpected response from remote server:'), l)
1673 1679 if self.ui.debugflag:
1674 1680 self.ui.debug('adding %s (%s)\n' %
1675 1681 (name, util.bytecount(size)))
1676 1682 # for backwards compat, name was partially encoded
1677 1683 ofp = self.sopener(store.decodedir(name), 'w')
1678 1684 for chunk in util.filechunkiter(fp, limit=size):
1679 1685 handled_bytes += len(chunk)
1680 1686 self.ui.progress(_('clone'), handled_bytes,
1681 1687 total=total_bytes)
1682 1688 ofp.write(chunk)
1683 1689 ofp.close()
1684 1690 tr.close()
1685 1691 finally:
1686 1692 tr.release()
1687 1693
1688 1694 # Writing straight to files circumvented the inmemory caches
1689 1695 self.invalidate()
1690 1696
1691 1697 elapsed = time.time() - start
1692 1698 if elapsed <= 0:
1693 1699 elapsed = 0.001
1694 1700 self.ui.progress(_('clone'), None)
1695 1701 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1696 1702 (util.bytecount(total_bytes), elapsed,
1697 1703 util.bytecount(total_bytes / elapsed)))
1698 1704
1699 1705 # new requirements = old non-format requirements +
1700 1706 # new format-related
1701 1707 # requirements from the streamed-in repository
1702 1708 requirements.update(set(self.requirements) - self.supportedformats)
1703 1709 self._applyrequirements(requirements)
1704 1710 self._writerequirements()
1705 1711
1706 1712 if rbranchmap:
1707 1713 rbheads = []
1708 1714 closed = []
1709 1715 for bheads in rbranchmap.itervalues():
1710 1716 rbheads.extend(bheads)
1711 1717 for h in bheads:
1712 1718 r = self.changelog.rev(h)
1713 1719 b, c = self.changelog.branchinfo(r)
1714 1720 if c:
1715 1721 closed.append(h)
1716 1722
1717 1723 if rbheads:
1718 1724 rtiprev = max((int(self.changelog.rev(node))
1719 1725 for node in rbheads))
1720 1726 cache = branchmap.branchcache(rbranchmap,
1721 1727 self[rtiprev].node(),
1722 1728 rtiprev,
1723 1729 closednodes=closed)
1724 1730 # Try to stick it as low as possible
1725 1731 # filter above served are unlikely to be fetch from a clone
1726 1732 for candidate in ('base', 'immutable', 'served'):
1727 1733 rview = self.filtered(candidate)
1728 1734 if cache.validfor(rview):
1729 1735 self._branchcaches[candidate] = cache
1730 1736 cache.write(rview)
1731 1737 break
1732 1738 self.invalidate()
1733 1739 return len(self.heads()) + 1
1734 1740 finally:
1735 1741 lock.release()
1736 1742
1737 1743 def clone(self, remote, heads=[], stream=None):
1738 1744 '''clone remote repository.
1739 1745
1740 1746 keyword arguments:
1741 1747 heads: list of revs to clone (forces use of pull)
1742 1748 stream: use streaming clone if possible'''
1743 1749
1744 1750 # now, all clients that can request uncompressed clones can
1745 1751 # read repo formats supported by all servers that can serve
1746 1752 # them.
1747 1753
1748 1754 # if revlog format changes, client will have to check version
1749 1755 # and format flags on "stream" capability, and use
1750 1756 # uncompressed only if compatible.
1751 1757
1752 1758 if stream is None:
1753 1759 # if the server explicitly prefers to stream (for fast LANs)
1754 1760 stream = remote.capable('stream-preferred')
1755 1761
1756 1762 if stream and not heads:
1757 1763 # 'stream' means remote revlog format is revlogv1 only
1758 1764 if remote.capable('stream'):
1759 1765 self.stream_in(remote, set(('revlogv1',)))
1760 1766 else:
1761 1767 # otherwise, 'streamreqs' contains the remote revlog format
1762 1768 streamreqs = remote.capable('streamreqs')
1763 1769 if streamreqs:
1764 1770 streamreqs = set(streamreqs.split(','))
1765 1771 # if we support it, stream in and adjust our requirements
1766 1772 if not streamreqs - self.supportedformats:
1767 1773 self.stream_in(remote, streamreqs)
1768 1774
1769 1775 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1770 1776 try:
1771 1777 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1772 1778 ret = exchange.pull(self, remote, heads).cgresult
1773 1779 finally:
1774 1780 self.ui.restoreconfig(quiet)
1775 1781 return ret
1776 1782
1777 1783 def pushkey(self, namespace, key, old, new):
1778 1784 try:
1779 1785 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1780 1786 old=old, new=new)
1781 1787 except error.HookAbort, exc:
1782 1788 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1783 1789 if exc.hint:
1784 1790 self.ui.write_err(_("(%s)\n") % exc.hint)
1785 1791 return False
1786 1792 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1787 1793 ret = pushkey.push(self, namespace, key, old, new)
1788 1794 def runhook():
1789 1795 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1790 1796 ret=ret)
1791 1797 self._afterlock(runhook)
1792 1798 return ret
1793 1799
1794 1800 def listkeys(self, namespace):
1795 1801 self.hook('prelistkeys', throw=True, namespace=namespace)
1796 1802 self.ui.debug('listing keys for "%s"\n' % namespace)
1797 1803 values = pushkey.list(self, namespace)
1798 1804 self.hook('listkeys', namespace=namespace, values=values)
1799 1805 return values
1800 1806
1801 1807 def debugwireargs(self, one, two, three=None, four=None, five=None):
1802 1808 '''used to test argument passing over the wire'''
1803 1809 return "%s %s %s %s %s" % (one, two, three, four, five)
1804 1810
1805 1811 def savecommitmessage(self, text):
1806 1812 fp = self.opener('last-message.txt', 'wb')
1807 1813 try:
1808 1814 fp.write(text)
1809 1815 finally:
1810 1816 fp.close()
1811 1817 return self.pathto(fp.name[len(self.root) + 1:])
1812 1818
1813 1819 # used to avoid circular references so destructors work
1814 1820 def aftertrans(files):
1815 1821 renamefiles = [tuple(t) for t in files]
1816 1822 def a():
1817 1823 for vfs, src, dest in renamefiles:
1818 1824 try:
1819 1825 vfs.rename(src, dest)
1820 1826 except OSError: # journal file does not yet exist
1821 1827 pass
1822 1828 return a
1823 1829
1824 1830 def undoname(fn):
1825 1831 base, name = os.path.split(fn)
1826 1832 assert name.startswith('journal')
1827 1833 return os.path.join(base, name.replace('journal', 'undo', 1))
1828 1834
1829 1835 def instance(ui, path, create):
1830 1836 return localrepository(ui, util.urllocalpath(path), create)
1831 1837
1832 1838 def islocal(path):
1833 1839 return True
@@ -1,1676 +1,1676 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import copy
9 9 import errno, os, re, shutil, posixpath, sys
10 10 import xml.dom.minidom
11 11 import stat, subprocess, tarfile
12 12 from i18n import _
13 13 import config, util, node, error, cmdutil, scmutil, match as matchmod
14 14 import phases
15 15 import pathutil
16 16 import exchange
17 17 hg = None
18 18 propertycache = util.propertycache
19 19
20 20 nullstate = ('', '', 'empty')
21 21
22 22 def _expandedabspath(path):
23 23 '''
24 24 get a path or url and if it is a path expand it and return an absolute path
25 25 '''
26 26 expandedpath = util.urllocalpath(util.expandpath(path))
27 27 u = util.url(expandedpath)
28 28 if not u.scheme:
29 29 path = util.normpath(os.path.abspath(u.path))
30 30 return path
31 31
32 32 def _getstorehashcachename(remotepath):
33 33 '''get a unique filename for the store hash cache of a remote repository'''
34 34 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
35 35
36 36 class SubrepoAbort(error.Abort):
37 37 """Exception class used to avoid handling a subrepo error more than once"""
38 38 def __init__(self, *args, **kw):
39 39 error.Abort.__init__(self, *args, **kw)
40 40 self.subrepo = kw.get('subrepo')
41 41 self.cause = kw.get('cause')
42 42
43 43 def annotatesubrepoerror(func):
44 44 def decoratedmethod(self, *args, **kargs):
45 45 try:
46 46 res = func(self, *args, **kargs)
47 47 except SubrepoAbort, ex:
48 48 # This exception has already been handled
49 49 raise ex
50 50 except error.Abort, ex:
51 51 subrepo = subrelpath(self)
52 52 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
53 53 # avoid handling this exception by raising a SubrepoAbort exception
54 54 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
55 55 cause=sys.exc_info())
56 56 return res
57 57 return decoratedmethod
58 58
59 59 def state(ctx, ui):
60 60 """return a state dict, mapping subrepo paths configured in .hgsub
61 61 to tuple: (source from .hgsub, revision from .hgsubstate, kind
62 62 (key in types dict))
63 63 """
64 64 p = config.config()
65 65 def read(f, sections=None, remap=None):
66 66 if f in ctx:
67 67 try:
68 68 data = ctx[f].data()
69 69 except IOError, err:
70 70 if err.errno != errno.ENOENT:
71 71 raise
72 72 # handle missing subrepo spec files as removed
73 73 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
74 74 return
75 75 p.parse(f, data, sections, remap, read)
76 76 else:
77 77 raise util.Abort(_("subrepo spec file %s not found") % f)
78 78
79 79 if '.hgsub' in ctx:
80 80 read('.hgsub')
81 81
82 82 for path, src in ui.configitems('subpaths'):
83 83 p.set('subpaths', path, src, ui.configsource('subpaths', path))
84 84
85 85 rev = {}
86 86 if '.hgsubstate' in ctx:
87 87 try:
88 88 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
89 89 l = l.lstrip()
90 90 if not l:
91 91 continue
92 92 try:
93 93 revision, path = l.split(" ", 1)
94 94 except ValueError:
95 95 raise util.Abort(_("invalid subrepository revision "
96 96 "specifier in .hgsubstate line %d")
97 97 % (i + 1))
98 98 rev[path] = revision
99 99 except IOError, err:
100 100 if err.errno != errno.ENOENT:
101 101 raise
102 102
103 103 def remap(src):
104 104 for pattern, repl in p.items('subpaths'):
105 105 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
106 106 # does a string decode.
107 107 repl = repl.encode('string-escape')
108 108 # However, we still want to allow back references to go
109 109 # through unharmed, so we turn r'\\1' into r'\1'. Again,
110 110 # extra escapes are needed because re.sub string decodes.
111 111 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
112 112 try:
113 113 src = re.sub(pattern, repl, src, 1)
114 114 except re.error, e:
115 115 raise util.Abort(_("bad subrepository pattern in %s: %s")
116 116 % (p.source('subpaths', pattern), e))
117 117 return src
118 118
119 119 state = {}
120 120 for path, src in p[''].items():
121 121 kind = 'hg'
122 122 if src.startswith('['):
123 123 if ']' not in src:
124 124 raise util.Abort(_('missing ] in subrepo source'))
125 125 kind, src = src.split(']', 1)
126 126 kind = kind[1:]
127 127 src = src.lstrip() # strip any extra whitespace after ']'
128 128
129 129 if not util.url(src).isabs():
130 130 parent = _abssource(ctx._repo, abort=False)
131 131 if parent:
132 132 parent = util.url(parent)
133 133 parent.path = posixpath.join(parent.path or '', src)
134 134 parent.path = posixpath.normpath(parent.path)
135 135 joined = str(parent)
136 136 # Remap the full joined path and use it if it changes,
137 137 # else remap the original source.
138 138 remapped = remap(joined)
139 139 if remapped == joined:
140 140 src = remap(src)
141 141 else:
142 142 src = remapped
143 143
144 144 src = remap(src)
145 145 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
146 146
147 147 return state
148 148
149 149 def writestate(repo, state):
150 150 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
151 151 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
152 152 repo.wwrite('.hgsubstate', ''.join(lines), '')
153 153
154 154 def submerge(repo, wctx, mctx, actx, overwrite):
155 155 """delegated from merge.applyupdates: merging of .hgsubstate file
156 156 in working context, merging context and ancestor context"""
157 157 if mctx == actx: # backwards?
158 158 actx = wctx.p1()
159 159 s1 = wctx.substate
160 160 s2 = mctx.substate
161 161 sa = actx.substate
162 162 sm = {}
163 163
164 164 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
165 165
166 166 def debug(s, msg, r=""):
167 167 if r:
168 168 r = "%s:%s:%s" % r
169 169 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
170 170
171 171 for s, l in sorted(s1.iteritems()):
172 172 a = sa.get(s, nullstate)
173 173 ld = l # local state with possible dirty flag for compares
174 174 if wctx.sub(s).dirty():
175 175 ld = (l[0], l[1] + "+")
176 176 if wctx == actx: # overwrite
177 177 a = ld
178 178
179 179 if s in s2:
180 180 r = s2[s]
181 181 if ld == r or r == a: # no change or local is newer
182 182 sm[s] = l
183 183 continue
184 184 elif ld == a: # other side changed
185 185 debug(s, "other changed, get", r)
186 186 wctx.sub(s).get(r, overwrite)
187 187 sm[s] = r
188 188 elif ld[0] != r[0]: # sources differ
189 189 if repo.ui.promptchoice(
190 190 _(' subrepository sources for %s differ\n'
191 191 'use (l)ocal source (%s) or (r)emote source (%s)?'
192 192 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
193 193 debug(s, "prompt changed, get", r)
194 194 wctx.sub(s).get(r, overwrite)
195 195 sm[s] = r
196 196 elif ld[1] == a[1]: # local side is unchanged
197 197 debug(s, "other side changed, get", r)
198 198 wctx.sub(s).get(r, overwrite)
199 199 sm[s] = r
200 200 else:
201 201 debug(s, "both sides changed")
202 202 srepo = wctx.sub(s)
203 203 option = repo.ui.promptchoice(
204 204 _(' subrepository %s diverged (local revision: %s, '
205 205 'remote revision: %s)\n'
206 206 '(M)erge, keep (l)ocal or keep (r)emote?'
207 207 '$$ &Merge $$ &Local $$ &Remote')
208 208 % (s, srepo.shortid(l[1]), srepo.shortid(r[1])), 0)
209 209 if option == 0:
210 210 wctx.sub(s).merge(r)
211 211 sm[s] = l
212 212 debug(s, "merge with", r)
213 213 elif option == 1:
214 214 sm[s] = l
215 215 debug(s, "keep local subrepo revision", l)
216 216 else:
217 217 wctx.sub(s).get(r, overwrite)
218 218 sm[s] = r
219 219 debug(s, "get remote subrepo revision", r)
220 220 elif ld == a: # remote removed, local unchanged
221 221 debug(s, "remote removed, remove")
222 222 wctx.sub(s).remove()
223 223 elif a == nullstate: # not present in remote or ancestor
224 224 debug(s, "local added, keep")
225 225 sm[s] = l
226 226 continue
227 227 else:
228 228 if repo.ui.promptchoice(
229 229 _(' local changed subrepository %s which remote removed\n'
230 230 'use (c)hanged version or (d)elete?'
231 231 '$$ &Changed $$ &Delete') % s, 0):
232 232 debug(s, "prompt remove")
233 233 wctx.sub(s).remove()
234 234
235 235 for s, r in sorted(s2.items()):
236 236 if s in s1:
237 237 continue
238 238 elif s not in sa:
239 239 debug(s, "remote added, get", r)
240 240 mctx.sub(s).get(r)
241 241 sm[s] = r
242 242 elif r != sa[s]:
243 243 if repo.ui.promptchoice(
244 244 _(' remote changed subrepository %s which local removed\n'
245 245 'use (c)hanged version or (d)elete?'
246 246 '$$ &Changed $$ &Delete') % s, 0) == 0:
247 247 debug(s, "prompt recreate", r)
248 248 wctx.sub(s).get(r)
249 249 sm[s] = r
250 250
251 251 # record merged .hgsubstate
252 252 writestate(repo, sm)
253 253 return sm
254 254
255 255 def _updateprompt(ui, sub, dirty, local, remote):
256 256 if dirty:
257 257 msg = (_(' subrepository sources for %s differ\n'
258 258 'use (l)ocal source (%s) or (r)emote source (%s)?'
259 259 '$$ &Local $$ &Remote')
260 260 % (subrelpath(sub), local, remote))
261 261 else:
262 262 msg = (_(' subrepository sources for %s differ (in checked out '
263 263 'version)\n'
264 264 'use (l)ocal source (%s) or (r)emote source (%s)?'
265 265 '$$ &Local $$ &Remote')
266 266 % (subrelpath(sub), local, remote))
267 267 return ui.promptchoice(msg, 0)
268 268
269 269 def reporelpath(repo):
270 270 """return path to this (sub)repo as seen from outermost repo"""
271 271 parent = repo
272 272 while util.safehasattr(parent, '_subparent'):
273 273 parent = parent._subparent
274 274 return repo.root[len(pathutil.normasprefix(parent.root)):]
275 275
276 276 def subrelpath(sub):
277 277 """return path to this subrepo as seen from outermost repo"""
278 278 if util.safehasattr(sub, '_relpath'):
279 279 return sub._relpath
280 280 if not util.safehasattr(sub, '_repo'):
281 281 return sub._path
282 282 return reporelpath(sub._repo)
283 283
284 284 def _abssource(repo, push=False, abort=True):
285 285 """return pull/push path of repo - either based on parent repo .hgsub info
286 286 or on the top repo config. Abort or return None if no source found."""
287 287 if util.safehasattr(repo, '_subparent'):
288 288 source = util.url(repo._subsource)
289 289 if source.isabs():
290 290 return str(source)
291 291 source.path = posixpath.normpath(source.path)
292 292 parent = _abssource(repo._subparent, push, abort=False)
293 293 if parent:
294 294 parent = util.url(util.pconvert(parent))
295 295 parent.path = posixpath.join(parent.path or '', source.path)
296 296 parent.path = posixpath.normpath(parent.path)
297 297 return str(parent)
298 298 else: # recursion reached top repo
299 299 if util.safehasattr(repo, '_subtoppath'):
300 300 return repo._subtoppath
301 301 if push and repo.ui.config('paths', 'default-push'):
302 302 return repo.ui.config('paths', 'default-push')
303 303 if repo.ui.config('paths', 'default'):
304 304 return repo.ui.config('paths', 'default')
305 if repo.sharedpath != repo.path:
305 if repo.shared():
306 306 # chop off the .hg component to get the default path form
307 307 return os.path.dirname(repo.sharedpath)
308 308 if abort:
309 309 raise util.Abort(_("default path for subrepository not found"))
310 310
311 311 def _sanitize(ui, path, ignore):
312 312 for dirname, dirs, names in os.walk(path):
313 313 for i, d in enumerate(dirs):
314 314 if d.lower() == ignore:
315 315 del dirs[i]
316 316 break
317 317 if os.path.basename(dirname).lower() != '.hg':
318 318 continue
319 319 for f in names:
320 320 if f.lower() == 'hgrc':
321 321 ui.warn(_("warning: removing potentially hostile 'hgrc' "
322 322 "in '%s'\n") % dirname)
323 323 os.unlink(os.path.join(dirname, f))
324 324
325 325 def subrepo(ctx, path):
326 326 """return instance of the right subrepo class for subrepo in path"""
327 327 # subrepo inherently violates our import layering rules
328 328 # because it wants to make repo objects from deep inside the stack
329 329 # so we manually delay the circular imports to not break
330 330 # scripts that don't use our demand-loading
331 331 global hg
332 332 import hg as h
333 333 hg = h
334 334
335 335 pathutil.pathauditor(ctx._repo.root)(path)
336 336 state = ctx.substate[path]
337 337 if state[2] not in types:
338 338 raise util.Abort(_('unknown subrepo type %s') % state[2])
339 339 return types[state[2]](ctx, path, state[:2])
340 340
341 341 def newcommitphase(ui, ctx):
342 342 commitphase = phases.newcommitphase(ui)
343 343 substate = getattr(ctx, "substate", None)
344 344 if not substate:
345 345 return commitphase
346 346 check = ui.config('phases', 'checksubrepos', 'follow')
347 347 if check not in ('ignore', 'follow', 'abort'):
348 348 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
349 349 % (check))
350 350 if check == 'ignore':
351 351 return commitphase
352 352 maxphase = phases.public
353 353 maxsub = None
354 354 for s in sorted(substate):
355 355 sub = ctx.sub(s)
356 356 subphase = sub.phase(substate[s][1])
357 357 if maxphase < subphase:
358 358 maxphase = subphase
359 359 maxsub = s
360 360 if commitphase < maxphase:
361 361 if check == 'abort':
362 362 raise util.Abort(_("can't commit in %s phase"
363 363 " conflicting %s from subrepository %s") %
364 364 (phases.phasenames[commitphase],
365 365 phases.phasenames[maxphase], maxsub))
366 366 ui.warn(_("warning: changes are committed in"
367 367 " %s phase from subrepository %s\n") %
368 368 (phases.phasenames[maxphase], maxsub))
369 369 return maxphase
370 370 return commitphase
371 371
372 372 # subrepo classes need to implement the following abstract class:
373 373
374 374 class abstractsubrepo(object):
375 375
376 376 def __init__(self, ui):
377 377 self.ui = ui
378 378
379 379 def storeclean(self, path):
380 380 """
381 381 returns true if the repository has not changed since it was last
382 382 cloned from or pushed to a given repository.
383 383 """
384 384 return False
385 385
386 386 def dirty(self, ignoreupdate=False):
387 387 """returns true if the dirstate of the subrepo is dirty or does not
388 388 match current stored state. If ignoreupdate is true, only check
389 389 whether the subrepo has uncommitted changes in its dirstate.
390 390 """
391 391 raise NotImplementedError
392 392
393 393 def basestate(self):
394 394 """current working directory base state, disregarding .hgsubstate
395 395 state and working directory modifications"""
396 396 raise NotImplementedError
397 397
398 398 def checknested(self, path):
399 399 """check if path is a subrepository within this repository"""
400 400 return False
401 401
402 402 def commit(self, text, user, date):
403 403 """commit the current changes to the subrepo with the given
404 404 log message. Use given user and date if possible. Return the
405 405 new state of the subrepo.
406 406 """
407 407 raise NotImplementedError
408 408
409 409 def phase(self, state):
410 410 """returns phase of specified state in the subrepository.
411 411 """
412 412 return phases.public
413 413
414 414 def remove(self):
415 415 """remove the subrepo
416 416
417 417 (should verify the dirstate is not dirty first)
418 418 """
419 419 raise NotImplementedError
420 420
421 421 def get(self, state, overwrite=False):
422 422 """run whatever commands are needed to put the subrepo into
423 423 this state
424 424 """
425 425 raise NotImplementedError
426 426
427 427 def merge(self, state):
428 428 """merge currently-saved state with the new state."""
429 429 raise NotImplementedError
430 430
431 431 def push(self, opts):
432 432 """perform whatever action is analogous to 'hg push'
433 433
434 434 This may be a no-op on some systems.
435 435 """
436 436 raise NotImplementedError
437 437
438 438 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
439 439 return []
440 440
441 441 def addremove(self, matcher, prefix, opts, dry_run, similarity):
442 442 self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
443 443 return 1
444 444
445 445 def cat(self, match, prefix, **opts):
446 446 return 1
447 447
448 448 def status(self, rev2, **opts):
449 449 return scmutil.status([], [], [], [], [], [], [])
450 450
451 451 def diff(self, ui, diffopts, node2, match, prefix, **opts):
452 452 pass
453 453
454 454 def outgoing(self, ui, dest, opts):
455 455 return 1
456 456
457 457 def incoming(self, ui, source, opts):
458 458 return 1
459 459
460 460 def files(self):
461 461 """return filename iterator"""
462 462 raise NotImplementedError
463 463
464 464 def filedata(self, name):
465 465 """return file data"""
466 466 raise NotImplementedError
467 467
468 468 def fileflags(self, name):
469 469 """return file flags"""
470 470 return ''
471 471
472 472 def archive(self, archiver, prefix, match=None):
473 473 if match is not None:
474 474 files = [f for f in self.files() if match(f)]
475 475 else:
476 476 files = self.files()
477 477 total = len(files)
478 478 relpath = subrelpath(self)
479 479 self.ui.progress(_('archiving (%s)') % relpath, 0,
480 480 unit=_('files'), total=total)
481 481 for i, name in enumerate(files):
482 482 flags = self.fileflags(name)
483 483 mode = 'x' in flags and 0755 or 0644
484 484 symlink = 'l' in flags
485 485 archiver.addfile(os.path.join(prefix, self._path, name),
486 486 mode, symlink, self.filedata(name))
487 487 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
488 488 unit=_('files'), total=total)
489 489 self.ui.progress(_('archiving (%s)') % relpath, None)
490 490 return total
491 491
492 492 def walk(self, match):
493 493 '''
494 494 walk recursively through the directory tree, finding all files
495 495 matched by the match function
496 496 '''
497 497 pass
498 498
499 499 def forget(self, match, prefix):
500 500 return ([], [])
501 501
502 502 def removefiles(self, matcher, prefix, after, force, subrepos):
503 503 """remove the matched files from the subrepository and the filesystem,
504 504 possibly by force and/or after the file has been removed from the
505 505 filesystem. Return 0 on success, 1 on any warning.
506 506 """
507 507 return 1
508 508
509 509 def revert(self, substate, *pats, **opts):
510 510 self.ui.warn('%s: reverting %s subrepos is unsupported\n' \
511 511 % (substate[0], substate[2]))
512 512 return []
513 513
514 514 def shortid(self, revid):
515 515 return revid
516 516
517 517 class hgsubrepo(abstractsubrepo):
518 518 def __init__(self, ctx, path, state):
519 519 super(hgsubrepo, self).__init__(ctx._repo.ui)
520 520 self._path = path
521 521 self._state = state
522 522 r = ctx._repo
523 523 root = r.wjoin(path)
524 524 create = not r.wvfs.exists('%s/.hg' % path)
525 525 self._repo = hg.repository(r.baseui, root, create=create)
526 526 self.ui = self._repo.ui
527 527 for s, k in [('ui', 'commitsubrepos')]:
528 528 v = r.ui.config(s, k)
529 529 if v:
530 530 self.ui.setconfig(s, k, v, 'subrepo')
531 531 self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
532 532 self._initrepo(r, state[0], create)
533 533
534 534 def storeclean(self, path):
535 535 lock = self._repo.lock()
536 536 try:
537 537 return self._storeclean(path)
538 538 finally:
539 539 lock.release()
540 540
541 541 def _storeclean(self, path):
542 542 clean = True
543 543 itercache = self._calcstorehash(path)
544 544 try:
545 545 for filehash in self._readstorehashcache(path):
546 546 if filehash != itercache.next():
547 547 clean = False
548 548 break
549 549 except StopIteration:
550 550 # the cached and current pull states have a different size
551 551 clean = False
552 552 if clean:
553 553 try:
554 554 itercache.next()
555 555 # the cached and current pull states have a different size
556 556 clean = False
557 557 except StopIteration:
558 558 pass
559 559 return clean
560 560
561 561 def _calcstorehash(self, remotepath):
562 562 '''calculate a unique "store hash"
563 563
564 564 This method is used to to detect when there are changes that may
565 565 require a push to a given remote path.'''
566 566 # sort the files that will be hashed in increasing (likely) file size
567 567 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
568 568 yield '# %s\n' % _expandedabspath(remotepath)
569 569 vfs = self._repo.vfs
570 570 for relname in filelist:
571 571 filehash = util.sha1(vfs.tryread(relname)).hexdigest()
572 572 yield '%s = %s\n' % (relname, filehash)
573 573
574 574 @propertycache
575 575 def _cachestorehashvfs(self):
576 576 return scmutil.vfs(self._repo.join('cache/storehash'))
577 577
578 578 def _readstorehashcache(self, remotepath):
579 579 '''read the store hash cache for a given remote repository'''
580 580 cachefile = _getstorehashcachename(remotepath)
581 581 return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
582 582
583 583 def _cachestorehash(self, remotepath):
584 584 '''cache the current store hash
585 585
586 586 Each remote repo requires its own store hash cache, because a subrepo
587 587 store may be "clean" versus a given remote repo, but not versus another
588 588 '''
589 589 cachefile = _getstorehashcachename(remotepath)
590 590 lock = self._repo.lock()
591 591 try:
592 592 storehash = list(self._calcstorehash(remotepath))
593 593 vfs = self._cachestorehashvfs
594 594 vfs.writelines(cachefile, storehash, mode='w', notindexed=True)
595 595 finally:
596 596 lock.release()
597 597
598 598 @annotatesubrepoerror
599 599 def _initrepo(self, parentrepo, source, create):
600 600 self._repo._subparent = parentrepo
601 601 self._repo._subsource = source
602 602
603 603 if create:
604 604 lines = ['[paths]\n']
605 605
606 606 def addpathconfig(key, value):
607 607 if value:
608 608 lines.append('%s = %s\n' % (key, value))
609 609 self.ui.setconfig('paths', key, value, 'subrepo')
610 610
611 611 defpath = _abssource(self._repo, abort=False)
612 612 defpushpath = _abssource(self._repo, True, abort=False)
613 613 addpathconfig('default', defpath)
614 614 if defpath != defpushpath:
615 615 addpathconfig('default-push', defpushpath)
616 616
617 617 fp = self._repo.opener("hgrc", "w", text=True)
618 618 try:
619 619 fp.write(''.join(lines))
620 620 finally:
621 621 fp.close()
622 622
623 623 @annotatesubrepoerror
624 624 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
625 625 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
626 626 os.path.join(prefix, self._path), explicitonly)
627 627
628 628 def addremove(self, m, prefix, opts, dry_run, similarity):
629 629 # In the same way as sub directories are processed, once in a subrepo,
630 630 # always entry any of its subrepos. Don't corrupt the options that will
631 631 # be used to process sibling subrepos however.
632 632 opts = copy.copy(opts)
633 633 opts['subrepos'] = True
634 634 return scmutil.addremove(self._repo, m,
635 635 os.path.join(prefix, self._path), opts,
636 636 dry_run, similarity)
637 637
638 638 @annotatesubrepoerror
639 639 def cat(self, match, prefix, **opts):
640 640 rev = self._state[1]
641 641 ctx = self._repo[rev]
642 642 return cmdutil.cat(self.ui, self._repo, ctx, match, prefix, **opts)
643 643
644 644 @annotatesubrepoerror
645 645 def status(self, rev2, **opts):
646 646 try:
647 647 rev1 = self._state[1]
648 648 ctx1 = self._repo[rev1]
649 649 ctx2 = self._repo[rev2]
650 650 return self._repo.status(ctx1, ctx2, **opts)
651 651 except error.RepoLookupError, inst:
652 652 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
653 653 % (inst, subrelpath(self)))
654 654 return scmutil.status([], [], [], [], [], [], [])
655 655
656 656 @annotatesubrepoerror
657 657 def diff(self, ui, diffopts, node2, match, prefix, **opts):
658 658 try:
659 659 node1 = node.bin(self._state[1])
660 660 # We currently expect node2 to come from substate and be
661 661 # in hex format
662 662 if node2 is not None:
663 663 node2 = node.bin(node2)
664 664 cmdutil.diffordiffstat(ui, self._repo, diffopts,
665 665 node1, node2, match,
666 666 prefix=posixpath.join(prefix, self._path),
667 667 listsubrepos=True, **opts)
668 668 except error.RepoLookupError, inst:
669 669 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
670 670 % (inst, subrelpath(self)))
671 671
672 672 @annotatesubrepoerror
673 673 def archive(self, archiver, prefix, match=None):
674 674 self._get(self._state + ('hg',))
675 675 total = abstractsubrepo.archive(self, archiver, prefix, match)
676 676 rev = self._state[1]
677 677 ctx = self._repo[rev]
678 678 for subpath in ctx.substate:
679 679 s = subrepo(ctx, subpath)
680 680 submatch = matchmod.narrowmatcher(subpath, match)
681 681 total += s.archive(
682 682 archiver, os.path.join(prefix, self._path), submatch)
683 683 return total
684 684
685 685 @annotatesubrepoerror
686 686 def dirty(self, ignoreupdate=False):
687 687 r = self._state[1]
688 688 if r == '' and not ignoreupdate: # no state recorded
689 689 return True
690 690 w = self._repo[None]
691 691 if r != w.p1().hex() and not ignoreupdate:
692 692 # different version checked out
693 693 return True
694 694 return w.dirty() # working directory changed
695 695
696 696 def basestate(self):
697 697 return self._repo['.'].hex()
698 698
699 699 def checknested(self, path):
700 700 return self._repo._checknested(self._repo.wjoin(path))
701 701
702 702 @annotatesubrepoerror
703 703 def commit(self, text, user, date):
704 704 # don't bother committing in the subrepo if it's only been
705 705 # updated
706 706 if not self.dirty(True):
707 707 return self._repo['.'].hex()
708 708 self.ui.debug("committing subrepo %s\n" % subrelpath(self))
709 709 n = self._repo.commit(text, user, date)
710 710 if not n:
711 711 return self._repo['.'].hex() # different version checked out
712 712 return node.hex(n)
713 713
714 714 @annotatesubrepoerror
715 715 def phase(self, state):
716 716 return self._repo[state].phase()
717 717
718 718 @annotatesubrepoerror
719 719 def remove(self):
720 720 # we can't fully delete the repository as it may contain
721 721 # local-only history
722 722 self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
723 723 hg.clean(self._repo, node.nullid, False)
724 724
725 725 def _get(self, state):
726 726 source, revision, kind = state
727 727 if revision in self._repo.unfiltered():
728 728 return True
729 729 self._repo._subsource = source
730 730 srcurl = _abssource(self._repo)
731 731 other = hg.peer(self._repo, {}, srcurl)
732 732 if len(self._repo) == 0:
733 733 self.ui.status(_('cloning subrepo %s from %s\n')
734 734 % (subrelpath(self), srcurl))
735 735 parentrepo = self._repo._subparent
736 736 shutil.rmtree(self._repo.path)
737 737 other, cloned = hg.clone(self._repo._subparent.baseui, {},
738 738 other, self._repo.root,
739 739 update=False)
740 740 self._repo = cloned.local()
741 741 self._initrepo(parentrepo, source, create=True)
742 742 self._cachestorehash(srcurl)
743 743 else:
744 744 self.ui.status(_('pulling subrepo %s from %s\n')
745 745 % (subrelpath(self), srcurl))
746 746 cleansub = self.storeclean(srcurl)
747 747 exchange.pull(self._repo, other)
748 748 if cleansub:
749 749 # keep the repo clean after pull
750 750 self._cachestorehash(srcurl)
751 751 return False
752 752
753 753 @annotatesubrepoerror
754 754 def get(self, state, overwrite=False):
755 755 inrepo = self._get(state)
756 756 source, revision, kind = state
757 757 repo = self._repo
758 758 repo.ui.debug("getting subrepo %s\n" % self._path)
759 759 if inrepo:
760 760 urepo = repo.unfiltered()
761 761 ctx = urepo[revision]
762 762 if ctx.hidden():
763 763 urepo.ui.warn(
764 764 _('revision %s in subrepo %s is hidden\n') \
765 765 % (revision[0:12], self._path))
766 766 repo = urepo
767 767 hg.updaterepo(repo, revision, overwrite)
768 768
769 769 @annotatesubrepoerror
770 770 def merge(self, state):
771 771 self._get(state)
772 772 cur = self._repo['.']
773 773 dst = self._repo[state[1]]
774 774 anc = dst.ancestor(cur)
775 775
776 776 def mergefunc():
777 777 if anc == cur and dst.branch() == cur.branch():
778 778 self.ui.debug("updating subrepo %s\n" % subrelpath(self))
779 779 hg.update(self._repo, state[1])
780 780 elif anc == dst:
781 781 self.ui.debug("skipping subrepo %s\n" % subrelpath(self))
782 782 else:
783 783 self.ui.debug("merging subrepo %s\n" % subrelpath(self))
784 784 hg.merge(self._repo, state[1], remind=False)
785 785
786 786 wctx = self._repo[None]
787 787 if self.dirty():
788 788 if anc != dst:
789 789 if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
790 790 mergefunc()
791 791 else:
792 792 mergefunc()
793 793 else:
794 794 mergefunc()
795 795
796 796 @annotatesubrepoerror
797 797 def push(self, opts):
798 798 force = opts.get('force')
799 799 newbranch = opts.get('new_branch')
800 800 ssh = opts.get('ssh')
801 801
802 802 # push subrepos depth-first for coherent ordering
803 803 c = self._repo['']
804 804 subs = c.substate # only repos that are committed
805 805 for s in sorted(subs):
806 806 if c.sub(s).push(opts) == 0:
807 807 return False
808 808
809 809 dsturl = _abssource(self._repo, True)
810 810 if not force:
811 811 if self.storeclean(dsturl):
812 812 self.ui.status(
813 813 _('no changes made to subrepo %s since last push to %s\n')
814 814 % (subrelpath(self), dsturl))
815 815 return None
816 816 self.ui.status(_('pushing subrepo %s to %s\n') %
817 817 (subrelpath(self), dsturl))
818 818 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
819 819 res = exchange.push(self._repo, other, force, newbranch=newbranch)
820 820
821 821 # the repo is now clean
822 822 self._cachestorehash(dsturl)
823 823 return res.cgresult
824 824
825 825 @annotatesubrepoerror
826 826 def outgoing(self, ui, dest, opts):
827 827 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
828 828
829 829 @annotatesubrepoerror
830 830 def incoming(self, ui, source, opts):
831 831 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
832 832
833 833 @annotatesubrepoerror
834 834 def files(self):
835 835 rev = self._state[1]
836 836 ctx = self._repo[rev]
837 837 return ctx.manifest()
838 838
839 839 def filedata(self, name):
840 840 rev = self._state[1]
841 841 return self._repo[rev][name].data()
842 842
843 843 def fileflags(self, name):
844 844 rev = self._state[1]
845 845 ctx = self._repo[rev]
846 846 return ctx.flags(name)
847 847
848 848 def walk(self, match):
849 849 ctx = self._repo[None]
850 850 return ctx.walk(match)
851 851
852 852 @annotatesubrepoerror
853 853 def forget(self, match, prefix):
854 854 return cmdutil.forget(self.ui, self._repo, match,
855 855 os.path.join(prefix, self._path), True)
856 856
857 857 @annotatesubrepoerror
858 858 def removefiles(self, matcher, prefix, after, force, subrepos):
859 859 return cmdutil.remove(self.ui, self._repo, matcher,
860 860 os.path.join(prefix, self._path), after, force,
861 861 subrepos)
862 862
863 863 @annotatesubrepoerror
864 864 def revert(self, substate, *pats, **opts):
865 865 # reverting a subrepo is a 2 step process:
866 866 # 1. if the no_backup is not set, revert all modified
867 867 # files inside the subrepo
868 868 # 2. update the subrepo to the revision specified in
869 869 # the corresponding substate dictionary
870 870 self.ui.status(_('reverting subrepo %s\n') % substate[0])
871 871 if not opts.get('no_backup'):
872 872 # Revert all files on the subrepo, creating backups
873 873 # Note that this will not recursively revert subrepos
874 874 # We could do it if there was a set:subrepos() predicate
875 875 opts = opts.copy()
876 876 opts['date'] = None
877 877 opts['rev'] = substate[1]
878 878
879 879 pats = []
880 880 if not opts.get('all'):
881 881 pats = ['set:modified()']
882 882 self.filerevert(*pats, **opts)
883 883
884 884 # Update the repo to the revision specified in the given substate
885 885 self.get(substate, overwrite=True)
886 886
887 887 def filerevert(self, *pats, **opts):
888 888 ctx = self._repo[opts['rev']]
889 889 parents = self._repo.dirstate.parents()
890 890 if opts.get('all'):
891 891 pats = ['set:modified()']
892 892 else:
893 893 pats = []
894 894 cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
895 895
896 896 def shortid(self, revid):
897 897 return revid[:12]
898 898
899 899 class svnsubrepo(abstractsubrepo):
900 900 def __init__(self, ctx, path, state):
901 901 super(svnsubrepo, self).__init__(ctx._repo.ui)
902 902 self._path = path
903 903 self._state = state
904 904 self._ctx = ctx
905 905 self._exe = util.findexe('svn')
906 906 if not self._exe:
907 907 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
908 908 % self._path)
909 909
910 910 def _svncommand(self, commands, filename='', failok=False):
911 911 cmd = [self._exe]
912 912 extrakw = {}
913 913 if not self.ui.interactive():
914 914 # Making stdin be a pipe should prevent svn from behaving
915 915 # interactively even if we can't pass --non-interactive.
916 916 extrakw['stdin'] = subprocess.PIPE
917 917 # Starting in svn 1.5 --non-interactive is a global flag
918 918 # instead of being per-command, but we need to support 1.4 so
919 919 # we have to be intelligent about what commands take
920 920 # --non-interactive.
921 921 if commands[0] in ('update', 'checkout', 'commit'):
922 922 cmd.append('--non-interactive')
923 923 cmd.extend(commands)
924 924 if filename is not None:
925 925 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
926 926 cmd.append(path)
927 927 env = dict(os.environ)
928 928 # Avoid localized output, preserve current locale for everything else.
929 929 lc_all = env.get('LC_ALL')
930 930 if lc_all:
931 931 env['LANG'] = lc_all
932 932 del env['LC_ALL']
933 933 env['LC_MESSAGES'] = 'C'
934 934 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
935 935 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
936 936 universal_newlines=True, env=env, **extrakw)
937 937 stdout, stderr = p.communicate()
938 938 stderr = stderr.strip()
939 939 if not failok:
940 940 if p.returncode:
941 941 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
942 942 if stderr:
943 943 self.ui.warn(stderr + '\n')
944 944 return stdout, stderr
945 945
946 946 @propertycache
947 947 def _svnversion(self):
948 948 output, err = self._svncommand(['--version', '--quiet'], filename=None)
949 949 m = re.search(r'^(\d+)\.(\d+)', output)
950 950 if not m:
951 951 raise util.Abort(_('cannot retrieve svn tool version'))
952 952 return (int(m.group(1)), int(m.group(2)))
953 953
954 954 def _wcrevs(self):
955 955 # Get the working directory revision as well as the last
956 956 # commit revision so we can compare the subrepo state with
957 957 # both. We used to store the working directory one.
958 958 output, err = self._svncommand(['info', '--xml'])
959 959 doc = xml.dom.minidom.parseString(output)
960 960 entries = doc.getElementsByTagName('entry')
961 961 lastrev, rev = '0', '0'
962 962 if entries:
963 963 rev = str(entries[0].getAttribute('revision')) or '0'
964 964 commits = entries[0].getElementsByTagName('commit')
965 965 if commits:
966 966 lastrev = str(commits[0].getAttribute('revision')) or '0'
967 967 return (lastrev, rev)
968 968
969 969 def _wcrev(self):
970 970 return self._wcrevs()[0]
971 971
972 972 def _wcchanged(self):
973 973 """Return (changes, extchanges, missing) where changes is True
974 974 if the working directory was changed, extchanges is
975 975 True if any of these changes concern an external entry and missing
976 976 is True if any change is a missing entry.
977 977 """
978 978 output, err = self._svncommand(['status', '--xml'])
979 979 externals, changes, missing = [], [], []
980 980 doc = xml.dom.minidom.parseString(output)
981 981 for e in doc.getElementsByTagName('entry'):
982 982 s = e.getElementsByTagName('wc-status')
983 983 if not s:
984 984 continue
985 985 item = s[0].getAttribute('item')
986 986 props = s[0].getAttribute('props')
987 987 path = e.getAttribute('path')
988 988 if item == 'external':
989 989 externals.append(path)
990 990 elif item == 'missing':
991 991 missing.append(path)
992 992 if (item not in ('', 'normal', 'unversioned', 'external')
993 993 or props not in ('', 'none', 'normal')):
994 994 changes.append(path)
995 995 for path in changes:
996 996 for ext in externals:
997 997 if path == ext or path.startswith(ext + os.sep):
998 998 return True, True, bool(missing)
999 999 return bool(changes), False, bool(missing)
1000 1000
1001 1001 def dirty(self, ignoreupdate=False):
1002 1002 if not self._wcchanged()[0]:
1003 1003 if self._state[1] in self._wcrevs() or ignoreupdate:
1004 1004 return False
1005 1005 return True
1006 1006
1007 1007 def basestate(self):
1008 1008 lastrev, rev = self._wcrevs()
1009 1009 if lastrev != rev:
1010 1010 # Last committed rev is not the same than rev. We would
1011 1011 # like to take lastrev but we do not know if the subrepo
1012 1012 # URL exists at lastrev. Test it and fallback to rev it
1013 1013 # is not there.
1014 1014 try:
1015 1015 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1016 1016 return lastrev
1017 1017 except error.Abort:
1018 1018 pass
1019 1019 return rev
1020 1020
1021 1021 @annotatesubrepoerror
1022 1022 def commit(self, text, user, date):
1023 1023 # user and date are out of our hands since svn is centralized
1024 1024 changed, extchanged, missing = self._wcchanged()
1025 1025 if not changed:
1026 1026 return self.basestate()
1027 1027 if extchanged:
1028 1028 # Do not try to commit externals
1029 1029 raise util.Abort(_('cannot commit svn externals'))
1030 1030 if missing:
1031 1031 # svn can commit with missing entries but aborting like hg
1032 1032 # seems a better approach.
1033 1033 raise util.Abort(_('cannot commit missing svn entries'))
1034 1034 commitinfo, err = self._svncommand(['commit', '-m', text])
1035 1035 self.ui.status(commitinfo)
1036 1036 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1037 1037 if not newrev:
1038 1038 if not commitinfo.strip():
1039 1039 # Sometimes, our definition of "changed" differs from
1040 1040 # svn one. For instance, svn ignores missing files
1041 1041 # when committing. If there are only missing files, no
1042 1042 # commit is made, no output and no error code.
1043 1043 raise util.Abort(_('failed to commit svn changes'))
1044 1044 raise util.Abort(commitinfo.splitlines()[-1])
1045 1045 newrev = newrev.groups()[0]
1046 1046 self.ui.status(self._svncommand(['update', '-r', newrev])[0])
1047 1047 return newrev
1048 1048
1049 1049 @annotatesubrepoerror
1050 1050 def remove(self):
1051 1051 if self.dirty():
1052 1052 self.ui.warn(_('not removing repo %s because '
1053 1053 'it has changes.\n') % self._path)
1054 1054 return
1055 1055 self.ui.note(_('removing subrepo %s\n') % self._path)
1056 1056
1057 1057 def onerror(function, path, excinfo):
1058 1058 if function is not os.remove:
1059 1059 raise
1060 1060 # read-only files cannot be unlinked under Windows
1061 1061 s = os.stat(path)
1062 1062 if (s.st_mode & stat.S_IWRITE) != 0:
1063 1063 raise
1064 1064 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1065 1065 os.remove(path)
1066 1066
1067 1067 path = self._ctx._repo.wjoin(self._path)
1068 1068 shutil.rmtree(path, onerror=onerror)
1069 1069 try:
1070 1070 os.removedirs(os.path.dirname(path))
1071 1071 except OSError:
1072 1072 pass
1073 1073
1074 1074 @annotatesubrepoerror
1075 1075 def get(self, state, overwrite=False):
1076 1076 if overwrite:
1077 1077 self._svncommand(['revert', '--recursive'])
1078 1078 args = ['checkout']
1079 1079 if self._svnversion >= (1, 5):
1080 1080 args.append('--force')
1081 1081 # The revision must be specified at the end of the URL to properly
1082 1082 # update to a directory which has since been deleted and recreated.
1083 1083 args.append('%s@%s' % (state[0], state[1]))
1084 1084 status, err = self._svncommand(args, failok=True)
1085 1085 _sanitize(self.ui, self._ctx._repo.wjoin(self._path), '.svn')
1086 1086 if not re.search('Checked out revision [0-9]+.', status):
1087 1087 if ('is already a working copy for a different URL' in err
1088 1088 and (self._wcchanged()[:2] == (False, False))):
1089 1089 # obstructed but clean working copy, so just blow it away.
1090 1090 self.remove()
1091 1091 self.get(state, overwrite=False)
1092 1092 return
1093 1093 raise util.Abort((status or err).splitlines()[-1])
1094 1094 self.ui.status(status)
1095 1095
1096 1096 @annotatesubrepoerror
1097 1097 def merge(self, state):
1098 1098 old = self._state[1]
1099 1099 new = state[1]
1100 1100 wcrev = self._wcrev()
1101 1101 if new != wcrev:
1102 1102 dirty = old == wcrev or self._wcchanged()[0]
1103 1103 if _updateprompt(self.ui, self, dirty, wcrev, new):
1104 1104 self.get(state, False)
1105 1105
1106 1106 def push(self, opts):
1107 1107 # push is a no-op for SVN
1108 1108 return True
1109 1109
1110 1110 @annotatesubrepoerror
1111 1111 def files(self):
1112 1112 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1113 1113 doc = xml.dom.minidom.parseString(output)
1114 1114 paths = []
1115 1115 for e in doc.getElementsByTagName('entry'):
1116 1116 kind = str(e.getAttribute('kind'))
1117 1117 if kind != 'file':
1118 1118 continue
1119 1119 name = ''.join(c.data for c
1120 1120 in e.getElementsByTagName('name')[0].childNodes
1121 1121 if c.nodeType == c.TEXT_NODE)
1122 1122 paths.append(name.encode('utf-8'))
1123 1123 return paths
1124 1124
1125 1125 def filedata(self, name):
1126 1126 return self._svncommand(['cat'], name)[0]
1127 1127
1128 1128
1129 1129 class gitsubrepo(abstractsubrepo):
1130 1130 def __init__(self, ctx, path, state):
1131 1131 super(gitsubrepo, self).__init__(ctx._repo.ui)
1132 1132 self._state = state
1133 1133 self._ctx = ctx
1134 1134 self._path = path
1135 1135 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1136 1136 self._abspath = ctx._repo.wjoin(path)
1137 1137 self._subparent = ctx._repo
1138 1138 self._ensuregit()
1139 1139
1140 1140 def _ensuregit(self):
1141 1141 try:
1142 1142 self._gitexecutable = 'git'
1143 1143 out, err = self._gitnodir(['--version'])
1144 1144 except OSError, e:
1145 1145 if e.errno != 2 or os.name != 'nt':
1146 1146 raise
1147 1147 self._gitexecutable = 'git.cmd'
1148 1148 out, err = self._gitnodir(['--version'])
1149 1149 versionstatus = self._checkversion(out)
1150 1150 if versionstatus == 'unknown':
1151 1151 self.ui.warn(_('cannot retrieve git version\n'))
1152 1152 elif versionstatus == 'abort':
1153 1153 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1154 1154 elif versionstatus == 'warning':
1155 1155 self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1156 1156
1157 1157 @staticmethod
1158 1158 def _gitversion(out):
1159 1159 m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
1160 1160 if m:
1161 1161 return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
1162 1162
1163 1163 m = re.search(r'^git version (\d+)\.(\d+)', out)
1164 1164 if m:
1165 1165 return (int(m.group(1)), int(m.group(2)), 0)
1166 1166
1167 1167 return -1
1168 1168
1169 1169 @staticmethod
1170 1170 def _checkversion(out):
1171 1171 '''ensure git version is new enough
1172 1172
1173 1173 >>> _checkversion = gitsubrepo._checkversion
1174 1174 >>> _checkversion('git version 1.6.0')
1175 1175 'ok'
1176 1176 >>> _checkversion('git version 1.8.5')
1177 1177 'ok'
1178 1178 >>> _checkversion('git version 1.4.0')
1179 1179 'abort'
1180 1180 >>> _checkversion('git version 1.5.0')
1181 1181 'warning'
1182 1182 >>> _checkversion('git version 1.9-rc0')
1183 1183 'ok'
1184 1184 >>> _checkversion('git version 1.9.0.265.g81cdec2')
1185 1185 'ok'
1186 1186 >>> _checkversion('git version 1.9.0.GIT')
1187 1187 'ok'
1188 1188 >>> _checkversion('git version 12345')
1189 1189 'unknown'
1190 1190 >>> _checkversion('no')
1191 1191 'unknown'
1192 1192 '''
1193 1193 version = gitsubrepo._gitversion(out)
1194 1194 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1195 1195 # despite the docstring comment. For now, error on 1.4.0, warn on
1196 1196 # 1.5.0 but attempt to continue.
1197 1197 if version == -1:
1198 1198 return 'unknown'
1199 1199 if version < (1, 5, 0):
1200 1200 return 'abort'
1201 1201 elif version < (1, 6, 0):
1202 1202 return 'warning'
1203 1203 return 'ok'
1204 1204
1205 1205 def _gitcommand(self, commands, env=None, stream=False):
1206 1206 return self._gitdir(commands, env=env, stream=stream)[0]
1207 1207
1208 1208 def _gitdir(self, commands, env=None, stream=False):
1209 1209 return self._gitnodir(commands, env=env, stream=stream,
1210 1210 cwd=self._abspath)
1211 1211
1212 1212 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1213 1213 """Calls the git command
1214 1214
1215 1215 The methods tries to call the git command. versions prior to 1.6.0
1216 1216 are not supported and very probably fail.
1217 1217 """
1218 1218 self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1219 1219 # unless ui.quiet is set, print git's stderr,
1220 1220 # which is mostly progress and useful info
1221 1221 errpipe = None
1222 1222 if self.ui.quiet:
1223 1223 errpipe = open(os.devnull, 'w')
1224 1224 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1225 1225 cwd=cwd, env=env, close_fds=util.closefds,
1226 1226 stdout=subprocess.PIPE, stderr=errpipe)
1227 1227 if stream:
1228 1228 return p.stdout, None
1229 1229
1230 1230 retdata = p.stdout.read().strip()
1231 1231 # wait for the child to exit to avoid race condition.
1232 1232 p.wait()
1233 1233
1234 1234 if p.returncode != 0 and p.returncode != 1:
1235 1235 # there are certain error codes that are ok
1236 1236 command = commands[0]
1237 1237 if command in ('cat-file', 'symbolic-ref'):
1238 1238 return retdata, p.returncode
1239 1239 # for all others, abort
1240 1240 raise util.Abort('git %s error %d in %s' %
1241 1241 (command, p.returncode, self._relpath))
1242 1242
1243 1243 return retdata, p.returncode
1244 1244
1245 1245 def _gitmissing(self):
1246 1246 return not os.path.exists(os.path.join(self._abspath, '.git'))
1247 1247
1248 1248 def _gitstate(self):
1249 1249 return self._gitcommand(['rev-parse', 'HEAD'])
1250 1250
1251 1251 def _gitcurrentbranch(self):
1252 1252 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1253 1253 if err:
1254 1254 current = None
1255 1255 return current
1256 1256
1257 1257 def _gitremote(self, remote):
1258 1258 out = self._gitcommand(['remote', 'show', '-n', remote])
1259 1259 line = out.split('\n')[1]
1260 1260 i = line.index('URL: ') + len('URL: ')
1261 1261 return line[i:]
1262 1262
1263 1263 def _githavelocally(self, revision):
1264 1264 out, code = self._gitdir(['cat-file', '-e', revision])
1265 1265 return code == 0
1266 1266
1267 1267 def _gitisancestor(self, r1, r2):
1268 1268 base = self._gitcommand(['merge-base', r1, r2])
1269 1269 return base == r1
1270 1270
1271 1271 def _gitisbare(self):
1272 1272 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1273 1273
1274 1274 def _gitupdatestat(self):
1275 1275 """This must be run before git diff-index.
1276 1276 diff-index only looks at changes to file stat;
1277 1277 this command looks at file contents and updates the stat."""
1278 1278 self._gitcommand(['update-index', '-q', '--refresh'])
1279 1279
1280 1280 def _gitbranchmap(self):
1281 1281 '''returns 2 things:
1282 1282 a map from git branch to revision
1283 1283 a map from revision to branches'''
1284 1284 branch2rev = {}
1285 1285 rev2branch = {}
1286 1286
1287 1287 out = self._gitcommand(['for-each-ref', '--format',
1288 1288 '%(objectname) %(refname)'])
1289 1289 for line in out.split('\n'):
1290 1290 revision, ref = line.split(' ')
1291 1291 if (not ref.startswith('refs/heads/') and
1292 1292 not ref.startswith('refs/remotes/')):
1293 1293 continue
1294 1294 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1295 1295 continue # ignore remote/HEAD redirects
1296 1296 branch2rev[ref] = revision
1297 1297 rev2branch.setdefault(revision, []).append(ref)
1298 1298 return branch2rev, rev2branch
1299 1299
1300 1300 def _gittracking(self, branches):
1301 1301 'return map of remote branch to local tracking branch'
1302 1302 # assumes no more than one local tracking branch for each remote
1303 1303 tracking = {}
1304 1304 for b in branches:
1305 1305 if b.startswith('refs/remotes/'):
1306 1306 continue
1307 1307 bname = b.split('/', 2)[2]
1308 1308 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1309 1309 if remote:
1310 1310 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1311 1311 tracking['refs/remotes/%s/%s' %
1312 1312 (remote, ref.split('/', 2)[2])] = b
1313 1313 return tracking
1314 1314
1315 1315 def _abssource(self, source):
1316 1316 if '://' not in source:
1317 1317 # recognize the scp syntax as an absolute source
1318 1318 colon = source.find(':')
1319 1319 if colon != -1 and '/' not in source[:colon]:
1320 1320 return source
1321 1321 self._subsource = source
1322 1322 return _abssource(self)
1323 1323
1324 1324 def _fetch(self, source, revision):
1325 1325 if self._gitmissing():
1326 1326 source = self._abssource(source)
1327 1327 self.ui.status(_('cloning subrepo %s from %s\n') %
1328 1328 (self._relpath, source))
1329 1329 self._gitnodir(['clone', source, self._abspath])
1330 1330 if self._githavelocally(revision):
1331 1331 return
1332 1332 self.ui.status(_('pulling subrepo %s from %s\n') %
1333 1333 (self._relpath, self._gitremote('origin')))
1334 1334 # try only origin: the originally cloned repo
1335 1335 self._gitcommand(['fetch'])
1336 1336 if not self._githavelocally(revision):
1337 1337 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1338 1338 (revision, self._relpath))
1339 1339
1340 1340 @annotatesubrepoerror
1341 1341 def dirty(self, ignoreupdate=False):
1342 1342 if self._gitmissing():
1343 1343 return self._state[1] != ''
1344 1344 if self._gitisbare():
1345 1345 return True
1346 1346 if not ignoreupdate and self._state[1] != self._gitstate():
1347 1347 # different version checked out
1348 1348 return True
1349 1349 # check for staged changes or modified files; ignore untracked files
1350 1350 self._gitupdatestat()
1351 1351 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1352 1352 return code == 1
1353 1353
1354 1354 def basestate(self):
1355 1355 return self._gitstate()
1356 1356
1357 1357 @annotatesubrepoerror
1358 1358 def get(self, state, overwrite=False):
1359 1359 source, revision, kind = state
1360 1360 if not revision:
1361 1361 self.remove()
1362 1362 return
1363 1363 self._fetch(source, revision)
1364 1364 # if the repo was set to be bare, unbare it
1365 1365 if self._gitisbare():
1366 1366 self._gitcommand(['config', 'core.bare', 'false'])
1367 1367 if self._gitstate() == revision:
1368 1368 self._gitcommand(['reset', '--hard', 'HEAD'])
1369 1369 return
1370 1370 elif self._gitstate() == revision:
1371 1371 if overwrite:
1372 1372 # first reset the index to unmark new files for commit, because
1373 1373 # reset --hard will otherwise throw away files added for commit,
1374 1374 # not just unmark them.
1375 1375 self._gitcommand(['reset', 'HEAD'])
1376 1376 self._gitcommand(['reset', '--hard', 'HEAD'])
1377 1377 return
1378 1378 branch2rev, rev2branch = self._gitbranchmap()
1379 1379
1380 1380 def checkout(args):
1381 1381 cmd = ['checkout']
1382 1382 if overwrite:
1383 1383 # first reset the index to unmark new files for commit, because
1384 1384 # the -f option will otherwise throw away files added for
1385 1385 # commit, not just unmark them.
1386 1386 self._gitcommand(['reset', 'HEAD'])
1387 1387 cmd.append('-f')
1388 1388 self._gitcommand(cmd + args)
1389 1389 _sanitize(self.ui, self._abspath, '.git')
1390 1390
1391 1391 def rawcheckout():
1392 1392 # no branch to checkout, check it out with no branch
1393 1393 self.ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1394 1394 self._relpath)
1395 1395 self.ui.warn(_('check out a git branch if you intend '
1396 1396 'to make changes\n'))
1397 1397 checkout(['-q', revision])
1398 1398
1399 1399 if revision not in rev2branch:
1400 1400 rawcheckout()
1401 1401 return
1402 1402 branches = rev2branch[revision]
1403 1403 firstlocalbranch = None
1404 1404 for b in branches:
1405 1405 if b == 'refs/heads/master':
1406 1406 # master trumps all other branches
1407 1407 checkout(['refs/heads/master'])
1408 1408 return
1409 1409 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1410 1410 firstlocalbranch = b
1411 1411 if firstlocalbranch:
1412 1412 checkout([firstlocalbranch])
1413 1413 return
1414 1414
1415 1415 tracking = self._gittracking(branch2rev.keys())
1416 1416 # choose a remote branch already tracked if possible
1417 1417 remote = branches[0]
1418 1418 if remote not in tracking:
1419 1419 for b in branches:
1420 1420 if b in tracking:
1421 1421 remote = b
1422 1422 break
1423 1423
1424 1424 if remote not in tracking:
1425 1425 # create a new local tracking branch
1426 1426 local = remote.split('/', 3)[3]
1427 1427 checkout(['-b', local, remote])
1428 1428 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1429 1429 # When updating to a tracked remote branch,
1430 1430 # if the local tracking branch is downstream of it,
1431 1431 # a normal `git pull` would have performed a "fast-forward merge"
1432 1432 # which is equivalent to updating the local branch to the remote.
1433 1433 # Since we are only looking at branching at update, we need to
1434 1434 # detect this situation and perform this action lazily.
1435 1435 if tracking[remote] != self._gitcurrentbranch():
1436 1436 checkout([tracking[remote]])
1437 1437 self._gitcommand(['merge', '--ff', remote])
1438 1438 _sanitize(self.ui, self._abspath, '.git')
1439 1439 else:
1440 1440 # a real merge would be required, just checkout the revision
1441 1441 rawcheckout()
1442 1442
1443 1443 @annotatesubrepoerror
1444 1444 def commit(self, text, user, date):
1445 1445 if self._gitmissing():
1446 1446 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1447 1447 cmd = ['commit', '-a', '-m', text]
1448 1448 env = os.environ.copy()
1449 1449 if user:
1450 1450 cmd += ['--author', user]
1451 1451 if date:
1452 1452 # git's date parser silently ignores when seconds < 1e9
1453 1453 # convert to ISO8601
1454 1454 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1455 1455 '%Y-%m-%dT%H:%M:%S %1%2')
1456 1456 self._gitcommand(cmd, env=env)
1457 1457 # make sure commit works otherwise HEAD might not exist under certain
1458 1458 # circumstances
1459 1459 return self._gitstate()
1460 1460
1461 1461 @annotatesubrepoerror
1462 1462 def merge(self, state):
1463 1463 source, revision, kind = state
1464 1464 self._fetch(source, revision)
1465 1465 base = self._gitcommand(['merge-base', revision, self._state[1]])
1466 1466 self._gitupdatestat()
1467 1467 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1468 1468
1469 1469 def mergefunc():
1470 1470 if base == revision:
1471 1471 self.get(state) # fast forward merge
1472 1472 elif base != self._state[1]:
1473 1473 self._gitcommand(['merge', '--no-commit', revision])
1474 1474 _sanitize(self.ui, self._abspath, '.git')
1475 1475
1476 1476 if self.dirty():
1477 1477 if self._gitstate() != revision:
1478 1478 dirty = self._gitstate() == self._state[1] or code != 0
1479 1479 if _updateprompt(self.ui, self, dirty,
1480 1480 self._state[1][:7], revision[:7]):
1481 1481 mergefunc()
1482 1482 else:
1483 1483 mergefunc()
1484 1484
1485 1485 @annotatesubrepoerror
1486 1486 def push(self, opts):
1487 1487 force = opts.get('force')
1488 1488
1489 1489 if not self._state[1]:
1490 1490 return True
1491 1491 if self._gitmissing():
1492 1492 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1493 1493 # if a branch in origin contains the revision, nothing to do
1494 1494 branch2rev, rev2branch = self._gitbranchmap()
1495 1495 if self._state[1] in rev2branch:
1496 1496 for b in rev2branch[self._state[1]]:
1497 1497 if b.startswith('refs/remotes/origin/'):
1498 1498 return True
1499 1499 for b, revision in branch2rev.iteritems():
1500 1500 if b.startswith('refs/remotes/origin/'):
1501 1501 if self._gitisancestor(self._state[1], revision):
1502 1502 return True
1503 1503 # otherwise, try to push the currently checked out branch
1504 1504 cmd = ['push']
1505 1505 if force:
1506 1506 cmd.append('--force')
1507 1507
1508 1508 current = self._gitcurrentbranch()
1509 1509 if current:
1510 1510 # determine if the current branch is even useful
1511 1511 if not self._gitisancestor(self._state[1], current):
1512 1512 self.ui.warn(_('unrelated git branch checked out '
1513 1513 'in subrepo %s\n') % self._relpath)
1514 1514 return False
1515 1515 self.ui.status(_('pushing branch %s of subrepo %s\n') %
1516 1516 (current.split('/', 2)[2], self._relpath))
1517 1517 ret = self._gitdir(cmd + ['origin', current])
1518 1518 return ret[1] == 0
1519 1519 else:
1520 1520 self.ui.warn(_('no branch checked out in subrepo %s\n'
1521 1521 'cannot push revision %s\n') %
1522 1522 (self._relpath, self._state[1]))
1523 1523 return False
1524 1524
1525 1525 @annotatesubrepoerror
1526 1526 def remove(self):
1527 1527 if self._gitmissing():
1528 1528 return
1529 1529 if self.dirty():
1530 1530 self.ui.warn(_('not removing repo %s because '
1531 1531 'it has changes.\n') % self._relpath)
1532 1532 return
1533 1533 # we can't fully delete the repository as it may contain
1534 1534 # local-only history
1535 1535 self.ui.note(_('removing subrepo %s\n') % self._relpath)
1536 1536 self._gitcommand(['config', 'core.bare', 'true'])
1537 1537 for f in os.listdir(self._abspath):
1538 1538 if f == '.git':
1539 1539 continue
1540 1540 path = os.path.join(self._abspath, f)
1541 1541 if os.path.isdir(path) and not os.path.islink(path):
1542 1542 shutil.rmtree(path)
1543 1543 else:
1544 1544 os.remove(path)
1545 1545
1546 1546 def archive(self, archiver, prefix, match=None):
1547 1547 total = 0
1548 1548 source, revision = self._state
1549 1549 if not revision:
1550 1550 return total
1551 1551 self._fetch(source, revision)
1552 1552
1553 1553 # Parse git's native archive command.
1554 1554 # This should be much faster than manually traversing the trees
1555 1555 # and objects with many subprocess calls.
1556 1556 tarstream = self._gitcommand(['archive', revision], stream=True)
1557 1557 tar = tarfile.open(fileobj=tarstream, mode='r|')
1558 1558 relpath = subrelpath(self)
1559 1559 self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1560 1560 for i, info in enumerate(tar):
1561 1561 if info.isdir():
1562 1562 continue
1563 1563 if match and not match(info.name):
1564 1564 continue
1565 1565 if info.issym():
1566 1566 data = info.linkname
1567 1567 else:
1568 1568 data = tar.extractfile(info).read()
1569 1569 archiver.addfile(os.path.join(prefix, self._path, info.name),
1570 1570 info.mode, info.issym(), data)
1571 1571 total += 1
1572 1572 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
1573 1573 unit=_('files'))
1574 1574 self.ui.progress(_('archiving (%s)') % relpath, None)
1575 1575 return total
1576 1576
1577 1577
1578 1578 @annotatesubrepoerror
1579 1579 def status(self, rev2, **opts):
1580 1580 rev1 = self._state[1]
1581 1581 if self._gitmissing() or not rev1:
1582 1582 # if the repo is missing, return no results
1583 1583 return [], [], [], [], [], [], []
1584 1584 modified, added, removed = [], [], []
1585 1585 self._gitupdatestat()
1586 1586 if rev2:
1587 1587 command = ['diff-tree', rev1, rev2]
1588 1588 else:
1589 1589 command = ['diff-index', rev1]
1590 1590 out = self._gitcommand(command)
1591 1591 for line in out.split('\n'):
1592 1592 tab = line.find('\t')
1593 1593 if tab == -1:
1594 1594 continue
1595 1595 status, f = line[tab - 1], line[tab + 1:]
1596 1596 if status == 'M':
1597 1597 modified.append(f)
1598 1598 elif status == 'A':
1599 1599 added.append(f)
1600 1600 elif status == 'D':
1601 1601 removed.append(f)
1602 1602
1603 1603 deleted, unknown, ignored, clean = [], [], [], []
1604 1604
1605 1605 if not rev2:
1606 1606 command = ['ls-files', '--others', '--exclude-standard']
1607 1607 out = self._gitcommand(command)
1608 1608 for line in out.split('\n'):
1609 1609 if len(line) == 0:
1610 1610 continue
1611 1611 unknown.append(line)
1612 1612
1613 1613 return scmutil.status(modified, added, removed, deleted,
1614 1614 unknown, ignored, clean)
1615 1615
1616 1616 @annotatesubrepoerror
1617 1617 def diff(self, ui, diffopts, node2, match, prefix, **opts):
1618 1618 node1 = self._state[1]
1619 1619 cmd = ['diff']
1620 1620 if opts['stat']:
1621 1621 cmd.append('--stat')
1622 1622 else:
1623 1623 # for Git, this also implies '-p'
1624 1624 cmd.append('-U%d' % diffopts.context)
1625 1625
1626 1626 gitprefix = os.path.join(prefix, self._path)
1627 1627
1628 1628 if diffopts.noprefix:
1629 1629 cmd.extend(['--src-prefix=%s/' % gitprefix,
1630 1630 '--dst-prefix=%s/' % gitprefix])
1631 1631 else:
1632 1632 cmd.extend(['--src-prefix=a/%s/' % gitprefix,
1633 1633 '--dst-prefix=b/%s/' % gitprefix])
1634 1634
1635 1635 if diffopts.ignorews:
1636 1636 cmd.append('--ignore-all-space')
1637 1637 if diffopts.ignorewsamount:
1638 1638 cmd.append('--ignore-space-change')
1639 1639 if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \
1640 1640 and diffopts.ignoreblanklines:
1641 1641 cmd.append('--ignore-blank-lines')
1642 1642
1643 1643 cmd.append(node1)
1644 1644 if node2:
1645 1645 cmd.append(node2)
1646 1646
1647 1647 if match.anypats():
1648 1648 return #No support for include/exclude yet
1649 1649
1650 1650 if match.always():
1651 1651 ui.write(self._gitcommand(cmd))
1652 1652 elif match.files():
1653 1653 for f in match.files():
1654 1654 ui.write(self._gitcommand(cmd + [f]))
1655 1655 elif match(gitprefix): #Subrepo is matched
1656 1656 ui.write(self._gitcommand(cmd))
1657 1657
1658 1658 def revert(self, substate, *pats, **opts):
1659 1659 self.ui.status(_('reverting subrepo %s\n') % substate[0])
1660 1660 if not opts.get('no_backup'):
1661 1661 self.ui.warn('%s: reverting %s subrepos without '
1662 1662 '--no-backup is unsupported\n'
1663 1663 % (substate[0], substate[2]))
1664 1664 return []
1665 1665
1666 1666 self.get(substate, overwrite=True)
1667 1667 return []
1668 1668
1669 1669 def shortid(self, revid):
1670 1670 return revid[:7]
1671 1671
1672 1672 types = {
1673 1673 'hg': hgsubrepo,
1674 1674 'svn': svnsubrepo,
1675 1675 'git': gitsubrepo,
1676 1676 }
General Comments 0
You need to be logged in to leave comments. Login now