##// END OF EJS Templates
peer: remove cancopy from peer api; use directly on repo instead
Sune Foldager -
r17194:32a6a33b default
parent child Browse files
Show More
@@ -1,597 +1,598
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 from node import hex, nullid
11 from node import hex, nullid
12 import localrepo, bundlerepo, httppeer, sshpeer, statichttprepo, bookmarks
12 import localrepo, bundlerepo, httppeer, sshpeer, statichttprepo, bookmarks
13 import lock, util, extensions, error, node, scmutil
13 import lock, util, extensions, error, node, scmutil
14 import cmdutil, discovery
14 import cmdutil, discovery
15 import merge as mergemod
15 import merge as mergemod
16 import verify as verifymod
16 import verify as verifymod
17 import errno, os, shutil
17 import errno, os, shutil
18
18
19 def _local(path):
19 def _local(path):
20 path = util.expandpath(util.urllocalpath(path))
20 path = util.expandpath(util.urllocalpath(path))
21 return (os.path.isfile(path) and bundlerepo or localrepo)
21 return (os.path.isfile(path) and bundlerepo or localrepo)
22
22
23 def addbranchrevs(lrepo, other, branches, revs):
23 def addbranchrevs(lrepo, other, branches, revs):
24 peer = other.peer() # a courtesy to callers using a localrepo for other
24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 hashbranch, branches = branches
25 hashbranch, branches = branches
26 if not hashbranch and not branches:
26 if not hashbranch and not branches:
27 return revs or None, revs and revs[0] or None
27 return revs or None, revs and revs[0] or None
28 revs = revs and list(revs) or []
28 revs = revs and list(revs) or []
29 if not peer.capable('branchmap'):
29 if not peer.capable('branchmap'):
30 if branches:
30 if branches:
31 raise util.Abort(_("remote branch lookup not supported"))
31 raise util.Abort(_("remote branch lookup not supported"))
32 revs.append(hashbranch)
32 revs.append(hashbranch)
33 return revs, revs[0]
33 return revs, revs[0]
34 branchmap = peer.branchmap()
34 branchmap = peer.branchmap()
35
35
36 def primary(branch):
36 def primary(branch):
37 if branch == '.':
37 if branch == '.':
38 if not lrepo:
38 if not lrepo:
39 raise util.Abort(_("dirstate branch not accessible"))
39 raise util.Abort(_("dirstate branch not accessible"))
40 branch = lrepo.dirstate.branch()
40 branch = lrepo.dirstate.branch()
41 if branch in branchmap:
41 if branch in branchmap:
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 return True
43 return True
44 else:
44 else:
45 return False
45 return False
46
46
47 for branch in branches:
47 for branch in branches:
48 if not primary(branch):
48 if not primary(branch):
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 if hashbranch:
50 if hashbranch:
51 if not primary(hashbranch):
51 if not primary(hashbranch):
52 revs.append(hashbranch)
52 revs.append(hashbranch)
53 return revs, revs[0]
53 return revs, revs[0]
54
54
55 def parseurl(path, branches=None):
55 def parseurl(path, branches=None):
56 '''parse url#branch, returning (url, (branch, branches))'''
56 '''parse url#branch, returning (url, (branch, branches))'''
57
57
58 u = util.url(path)
58 u = util.url(path)
59 branch = None
59 branch = None
60 if u.fragment:
60 if u.fragment:
61 branch = u.fragment
61 branch = u.fragment
62 u.fragment = None
62 u.fragment = None
63 return str(u), (branch, branches or [])
63 return str(u), (branch, branches or [])
64
64
65 schemes = {
65 schemes = {
66 'bundle': bundlerepo,
66 'bundle': bundlerepo,
67 'file': _local,
67 'file': _local,
68 'http': httppeer,
68 'http': httppeer,
69 'https': httppeer,
69 'https': httppeer,
70 'ssh': sshpeer,
70 'ssh': sshpeer,
71 'static-http': statichttprepo,
71 'static-http': statichttprepo,
72 }
72 }
73
73
74 def _peerlookup(path):
74 def _peerlookup(path):
75 u = util.url(path)
75 u = util.url(path)
76 scheme = u.scheme or 'file'
76 scheme = u.scheme or 'file'
77 thing = schemes.get(scheme) or schemes['file']
77 thing = schemes.get(scheme) or schemes['file']
78 try:
78 try:
79 return thing(path)
79 return thing(path)
80 except TypeError:
80 except TypeError:
81 return thing
81 return thing
82
82
83 def islocal(repo):
83 def islocal(repo):
84 '''return true if repo or path is local'''
84 '''return true if repo or path is local'''
85 if isinstance(repo, str):
85 if isinstance(repo, str):
86 try:
86 try:
87 return _peerlookup(repo).islocal(repo)
87 return _peerlookup(repo).islocal(repo)
88 except AttributeError:
88 except AttributeError:
89 return False
89 return False
90 return repo.local()
90 return repo.local()
91
91
92 def _peerorrepo(ui, path, create=False):
92 def _peerorrepo(ui, path, create=False):
93 """return a repository object for the specified path"""
93 """return a repository object for the specified path"""
94 obj = _peerlookup(path).instance(ui, path, create)
94 obj = _peerlookup(path).instance(ui, path, create)
95 ui = getattr(obj, "ui", ui)
95 ui = getattr(obj, "ui", ui)
96 for name, module in extensions.extensions():
96 for name, module in extensions.extensions():
97 hook = getattr(module, 'reposetup', None)
97 hook = getattr(module, 'reposetup', None)
98 if hook:
98 if hook:
99 hook(ui, obj)
99 hook(ui, obj)
100 return obj
100 return obj
101
101
102 def repository(ui, path='', create=False):
102 def repository(ui, path='', create=False):
103 """return a repository object for the specified path"""
103 """return a repository object for the specified path"""
104 peer = _peerorrepo(ui, path, create)
104 peer = _peerorrepo(ui, path, create)
105 repo = peer.local()
105 repo = peer.local()
106 if not repo:
106 if not repo:
107 raise util.Abort(_("repository '%s' is not local") %
107 raise util.Abort(_("repository '%s' is not local") %
108 (path or peer.url()))
108 (path or peer.url()))
109 return repo
109 return repo
110
110
111 def peer(uiorrepo, opts, path, create=False):
111 def peer(uiorrepo, opts, path, create=False):
112 '''return a repository peer for the specified path'''
112 '''return a repository peer for the specified path'''
113 rui = remoteui(uiorrepo, opts)
113 rui = remoteui(uiorrepo, opts)
114 return _peerorrepo(rui, path, create).peer()
114 return _peerorrepo(rui, path, create).peer()
115
115
116 def defaultdest(source):
116 def defaultdest(source):
117 '''return default destination of clone if none is given'''
117 '''return default destination of clone if none is given'''
118 return os.path.basename(os.path.normpath(source))
118 return os.path.basename(os.path.normpath(source))
119
119
120 def share(ui, source, dest=None, update=True):
120 def share(ui, source, dest=None, update=True):
121 '''create a shared repository'''
121 '''create a shared repository'''
122
122
123 if not islocal(source):
123 if not islocal(source):
124 raise util.Abort(_('can only share local repositories'))
124 raise util.Abort(_('can only share local repositories'))
125
125
126 if not dest:
126 if not dest:
127 dest = defaultdest(source)
127 dest = defaultdest(source)
128 else:
128 else:
129 dest = ui.expandpath(dest)
129 dest = ui.expandpath(dest)
130
130
131 if isinstance(source, str):
131 if isinstance(source, str):
132 origsource = ui.expandpath(source)
132 origsource = ui.expandpath(source)
133 source, branches = parseurl(origsource)
133 source, branches = parseurl(origsource)
134 srcrepo = repository(ui, source)
134 srcrepo = repository(ui, source)
135 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
135 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
136 else:
136 else:
137 srcrepo = source.local()
137 srcrepo = source.local()
138 origsource = source = srcrepo.url()
138 origsource = source = srcrepo.url()
139 checkout = None
139 checkout = None
140
140
141 sharedpath = srcrepo.sharedpath # if our source is already sharing
141 sharedpath = srcrepo.sharedpath # if our source is already sharing
142
142
143 root = os.path.realpath(dest)
143 root = os.path.realpath(dest)
144 roothg = os.path.join(root, '.hg')
144 roothg = os.path.join(root, '.hg')
145
145
146 if os.path.exists(roothg):
146 if os.path.exists(roothg):
147 raise util.Abort(_('destination already exists'))
147 raise util.Abort(_('destination already exists'))
148
148
149 if not os.path.isdir(root):
149 if not os.path.isdir(root):
150 os.mkdir(root)
150 os.mkdir(root)
151 util.makedir(roothg, notindexed=True)
151 util.makedir(roothg, notindexed=True)
152
152
153 requirements = ''
153 requirements = ''
154 try:
154 try:
155 requirements = srcrepo.opener.read('requires')
155 requirements = srcrepo.opener.read('requires')
156 except IOError, inst:
156 except IOError, inst:
157 if inst.errno != errno.ENOENT:
157 if inst.errno != errno.ENOENT:
158 raise
158 raise
159
159
160 requirements += 'shared\n'
160 requirements += 'shared\n'
161 util.writefile(os.path.join(roothg, 'requires'), requirements)
161 util.writefile(os.path.join(roothg, 'requires'), requirements)
162 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
162 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
163
163
164 r = repository(ui, root)
164 r = repository(ui, root)
165
165
166 default = srcrepo.ui.config('paths', 'default')
166 default = srcrepo.ui.config('paths', 'default')
167 if default:
167 if default:
168 fp = r.opener("hgrc", "w", text=True)
168 fp = r.opener("hgrc", "w", text=True)
169 fp.write("[paths]\n")
169 fp.write("[paths]\n")
170 fp.write("default = %s\n" % default)
170 fp.write("default = %s\n" % default)
171 fp.close()
171 fp.close()
172
172
173 if update:
173 if update:
174 r.ui.status(_("updating working directory\n"))
174 r.ui.status(_("updating working directory\n"))
175 if update is not True:
175 if update is not True:
176 checkout = update
176 checkout = update
177 for test in (checkout, 'default', 'tip'):
177 for test in (checkout, 'default', 'tip'):
178 if test is None:
178 if test is None:
179 continue
179 continue
180 try:
180 try:
181 uprev = r.lookup(test)
181 uprev = r.lookup(test)
182 break
182 break
183 except error.RepoLookupError:
183 except error.RepoLookupError:
184 continue
184 continue
185 _update(r, uprev)
185 _update(r, uprev)
186
186
187 def copystore(ui, srcrepo, destpath):
187 def copystore(ui, srcrepo, destpath):
188 '''copy files from store of srcrepo in destpath
188 '''copy files from store of srcrepo in destpath
189
189
190 returns destlock
190 returns destlock
191 '''
191 '''
192 destlock = None
192 destlock = None
193 try:
193 try:
194 hardlink = None
194 hardlink = None
195 num = 0
195 num = 0
196 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
196 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
197 for f in srcrepo.store.copylist():
197 for f in srcrepo.store.copylist():
198 if srcpublishing and f.endswith('phaseroots'):
198 if srcpublishing and f.endswith('phaseroots'):
199 continue
199 continue
200 src = os.path.join(srcrepo.sharedpath, f)
200 src = os.path.join(srcrepo.sharedpath, f)
201 dst = os.path.join(destpath, f)
201 dst = os.path.join(destpath, f)
202 dstbase = os.path.dirname(dst)
202 dstbase = os.path.dirname(dst)
203 if dstbase and not os.path.exists(dstbase):
203 if dstbase and not os.path.exists(dstbase):
204 os.mkdir(dstbase)
204 os.mkdir(dstbase)
205 if os.path.exists(src):
205 if os.path.exists(src):
206 if dst.endswith('data'):
206 if dst.endswith('data'):
207 # lock to avoid premature writing to the target
207 # lock to avoid premature writing to the target
208 destlock = lock.lock(os.path.join(dstbase, "lock"))
208 destlock = lock.lock(os.path.join(dstbase, "lock"))
209 hardlink, n = util.copyfiles(src, dst, hardlink)
209 hardlink, n = util.copyfiles(src, dst, hardlink)
210 num += n
210 num += n
211 if hardlink:
211 if hardlink:
212 ui.debug("linked %d files\n" % num)
212 ui.debug("linked %d files\n" % num)
213 else:
213 else:
214 ui.debug("copied %d files\n" % num)
214 ui.debug("copied %d files\n" % num)
215 return destlock
215 return destlock
216 except: # re-raises
216 except: # re-raises
217 release(destlock)
217 release(destlock)
218 raise
218 raise
219
219
220 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
220 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
221 update=True, stream=False, branch=None):
221 update=True, stream=False, branch=None):
222 """Make a copy of an existing repository.
222 """Make a copy of an existing repository.
223
223
224 Create a copy of an existing repository in a new directory. The
224 Create a copy of an existing repository in a new directory. The
225 source and destination are URLs, as passed to the repository
225 source and destination are URLs, as passed to the repository
226 function. Returns a pair of repository peers, the source and
226 function. Returns a pair of repository peers, the source and
227 newly created destination.
227 newly created destination.
228
228
229 The location of the source is added to the new repository's
229 The location of the source is added to the new repository's
230 .hg/hgrc file, as the default to be used for future pulls and
230 .hg/hgrc file, as the default to be used for future pulls and
231 pushes.
231 pushes.
232
232
233 If an exception is raised, the partly cloned/updated destination
233 If an exception is raised, the partly cloned/updated destination
234 repository will be deleted.
234 repository will be deleted.
235
235
236 Arguments:
236 Arguments:
237
237
238 source: repository object or URL
238 source: repository object or URL
239
239
240 dest: URL of destination repository to create (defaults to base
240 dest: URL of destination repository to create (defaults to base
241 name of source repository)
241 name of source repository)
242
242
243 pull: always pull from source repository, even in local case
243 pull: always pull from source repository, even in local case
244
244
245 stream: stream raw data uncompressed from repository (fast over
245 stream: stream raw data uncompressed from repository (fast over
246 LAN, slow over WAN)
246 LAN, slow over WAN)
247
247
248 rev: revision to clone up to (implies pull=True)
248 rev: revision to clone up to (implies pull=True)
249
249
250 update: update working directory after clone completes, if
250 update: update working directory after clone completes, if
251 destination is local repository (True means update to default rev,
251 destination is local repository (True means update to default rev,
252 anything else is treated as a revision)
252 anything else is treated as a revision)
253
253
254 branch: branches to clone
254 branch: branches to clone
255 """
255 """
256
256
257 if isinstance(source, str):
257 if isinstance(source, str):
258 origsource = ui.expandpath(source)
258 origsource = ui.expandpath(source)
259 source, branch = parseurl(origsource, branch)
259 source, branch = parseurl(origsource, branch)
260 srcpeer = peer(ui, peeropts, source)
260 srcpeer = peer(ui, peeropts, source)
261 else:
261 else:
262 srcpeer = source.peer() # in case we were called with a localrepo
262 srcpeer = source.peer() # in case we were called with a localrepo
263 branch = (None, branch or [])
263 branch = (None, branch or [])
264 origsource = source = srcpeer.url()
264 origsource = source = srcpeer.url()
265 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
265 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
266
266
267 if dest is None:
267 if dest is None:
268 dest = defaultdest(source)
268 dest = defaultdest(source)
269 ui.status(_("destination directory: %s\n") % dest)
269 ui.status(_("destination directory: %s\n") % dest)
270 else:
270 else:
271 dest = ui.expandpath(dest)
271 dest = ui.expandpath(dest)
272
272
273 dest = util.urllocalpath(dest)
273 dest = util.urllocalpath(dest)
274 source = util.urllocalpath(source)
274 source = util.urllocalpath(source)
275
275
276 if not dest:
276 if not dest:
277 raise util.Abort(_("empty destination path is not valid"))
277 raise util.Abort(_("empty destination path is not valid"))
278 if os.path.exists(dest):
278 if os.path.exists(dest):
279 if not os.path.isdir(dest):
279 if not os.path.isdir(dest):
280 raise util.Abort(_("destination '%s' already exists") % dest)
280 raise util.Abort(_("destination '%s' already exists") % dest)
281 elif os.listdir(dest):
281 elif os.listdir(dest):
282 raise util.Abort(_("destination '%s' is not empty") % dest)
282 raise util.Abort(_("destination '%s' is not empty") % dest)
283
283
284 class DirCleanup(object):
284 class DirCleanup(object):
285 def __init__(self, dir_):
285 def __init__(self, dir_):
286 self.rmtree = shutil.rmtree
286 self.rmtree = shutil.rmtree
287 self.dir_ = dir_
287 self.dir_ = dir_
288 def close(self):
288 def close(self):
289 self.dir_ = None
289 self.dir_ = None
290 def cleanup(self):
290 def cleanup(self):
291 if self.dir_:
291 if self.dir_:
292 self.rmtree(self.dir_, True)
292 self.rmtree(self.dir_, True)
293
293
294 srclock = destlock = dircleanup = None
294 srclock = destlock = dircleanup = None
295 srcrepo = srcpeer.local()
295 srcrepo = srcpeer.local()
296 try:
296 try:
297 abspath = origsource
297 abspath = origsource
298 if islocal(origsource):
298 if islocal(origsource):
299 abspath = os.path.abspath(util.urllocalpath(origsource))
299 abspath = os.path.abspath(util.urllocalpath(origsource))
300
300
301 if islocal(dest):
301 if islocal(dest):
302 dircleanup = DirCleanup(dest)
302 dircleanup = DirCleanup(dest)
303
303
304 copy = False
304 copy = False
305 if srcpeer.cancopy() and islocal(dest) and not srcrepo.revs("secret()"):
305 if (srcrepo and srcrepo.cancopy() and islocal(dest)
306 and not srcrepo.revs("secret()")):
306 copy = not pull and not rev
307 copy = not pull and not rev
307
308
308 if copy:
309 if copy:
309 try:
310 try:
310 # we use a lock here because if we race with commit, we
311 # we use a lock here because if we race with commit, we
311 # can end up with extra data in the cloned revlogs that's
312 # can end up with extra data in the cloned revlogs that's
312 # not pointed to by changesets, thus causing verify to
313 # not pointed to by changesets, thus causing verify to
313 # fail
314 # fail
314 srclock = srcrepo.lock(wait=False)
315 srclock = srcrepo.lock(wait=False)
315 except error.LockError:
316 except error.LockError:
316 copy = False
317 copy = False
317
318
318 if copy:
319 if copy:
319 srcrepo.hook('preoutgoing', throw=True, source='clone')
320 srcrepo.hook('preoutgoing', throw=True, source='clone')
320 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
321 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
321 if not os.path.exists(dest):
322 if not os.path.exists(dest):
322 os.mkdir(dest)
323 os.mkdir(dest)
323 else:
324 else:
324 # only clean up directories we create ourselves
325 # only clean up directories we create ourselves
325 dircleanup.dir_ = hgdir
326 dircleanup.dir_ = hgdir
326 try:
327 try:
327 destpath = hgdir
328 destpath = hgdir
328 util.makedir(destpath, notindexed=True)
329 util.makedir(destpath, notindexed=True)
329 except OSError, inst:
330 except OSError, inst:
330 if inst.errno == errno.EEXIST:
331 if inst.errno == errno.EEXIST:
331 dircleanup.close()
332 dircleanup.close()
332 raise util.Abort(_("destination '%s' already exists")
333 raise util.Abort(_("destination '%s' already exists")
333 % dest)
334 % dest)
334 raise
335 raise
335
336
336 destlock = copystore(ui, srcrepo, destpath)
337 destlock = copystore(ui, srcrepo, destpath)
337
338
338 # we need to re-init the repo after manually copying the data
339 # we need to re-init the repo after manually copying the data
339 # into it
340 # into it
340 destpeer = peer(ui, peeropts, dest)
341 destpeer = peer(ui, peeropts, dest)
341 srcrepo.hook('outgoing', source='clone',
342 srcrepo.hook('outgoing', source='clone',
342 node=node.hex(node.nullid))
343 node=node.hex(node.nullid))
343 else:
344 else:
344 try:
345 try:
345 destpeer = peer(ui, peeropts, dest, create=True)
346 destpeer = peer(ui, peeropts, dest, create=True)
346 except OSError, inst:
347 except OSError, inst:
347 if inst.errno == errno.EEXIST:
348 if inst.errno == errno.EEXIST:
348 dircleanup.close()
349 dircleanup.close()
349 raise util.Abort(_("destination '%s' already exists")
350 raise util.Abort(_("destination '%s' already exists")
350 % dest)
351 % dest)
351 raise
352 raise
352
353
353 revs = None
354 revs = None
354 if rev:
355 if rev:
355 if not srcpeer.capable('lookup'):
356 if not srcpeer.capable('lookup'):
356 raise util.Abort(_("src repository does not support "
357 raise util.Abort(_("src repository does not support "
357 "revision lookup and so doesn't "
358 "revision lookup and so doesn't "
358 "support clone by revision"))
359 "support clone by revision"))
359 revs = [srcpeer.lookup(r) for r in rev]
360 revs = [srcpeer.lookup(r) for r in rev]
360 checkout = revs[0]
361 checkout = revs[0]
361 if destpeer.local():
362 if destpeer.local():
362 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
363 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
363 elif srcrepo:
364 elif srcrepo:
364 srcrepo.push(destpeer, revs=revs)
365 srcrepo.push(destpeer, revs=revs)
365 else:
366 else:
366 raise util.Abort(_("clone from remote to remote not supported"))
367 raise util.Abort(_("clone from remote to remote not supported"))
367
368
368 if dircleanup:
369 if dircleanup:
369 dircleanup.close()
370 dircleanup.close()
370
371
371 # clone all bookmarks except divergent ones
372 # clone all bookmarks except divergent ones
372 destrepo = destpeer.local()
373 destrepo = destpeer.local()
373 if destrepo and srcpeer.capable("pushkey"):
374 if destrepo and srcpeer.capable("pushkey"):
374 rb = srcpeer.listkeys('bookmarks')
375 rb = srcpeer.listkeys('bookmarks')
375 for k, n in rb.iteritems():
376 for k, n in rb.iteritems():
376 try:
377 try:
377 m = destrepo.lookup(n)
378 m = destrepo.lookup(n)
378 destrepo._bookmarks[k] = m
379 destrepo._bookmarks[k] = m
379 except error.RepoLookupError:
380 except error.RepoLookupError:
380 pass
381 pass
381 if rb:
382 if rb:
382 bookmarks.write(destrepo)
383 bookmarks.write(destrepo)
383 elif srcrepo and destpeer.capable("pushkey"):
384 elif srcrepo and destpeer.capable("pushkey"):
384 for k, n in srcrepo._bookmarks.iteritems():
385 for k, n in srcrepo._bookmarks.iteritems():
385 destpeer.pushkey('bookmarks', k, '', hex(n))
386 destpeer.pushkey('bookmarks', k, '', hex(n))
386
387
387 if destrepo:
388 if destrepo:
388 fp = destrepo.opener("hgrc", "w", text=True)
389 fp = destrepo.opener("hgrc", "w", text=True)
389 fp.write("[paths]\n")
390 fp.write("[paths]\n")
390 u = util.url(abspath)
391 u = util.url(abspath)
391 u.passwd = None
392 u.passwd = None
392 defaulturl = str(u)
393 defaulturl = str(u)
393 fp.write("default = %s\n" % defaulturl)
394 fp.write("default = %s\n" % defaulturl)
394 fp.close()
395 fp.close()
395
396
396 destrepo.ui.setconfig('paths', 'default', defaulturl)
397 destrepo.ui.setconfig('paths', 'default', defaulturl)
397
398
398 if update:
399 if update:
399 if update is not True:
400 if update is not True:
400 checkout = srcrepo.lookup(update)
401 checkout = srcrepo.lookup(update)
401 for test in (checkout, 'default', 'tip'):
402 for test in (checkout, 'default', 'tip'):
402 if test is None:
403 if test is None:
403 continue
404 continue
404 try:
405 try:
405 uprev = destrepo.lookup(test)
406 uprev = destrepo.lookup(test)
406 break
407 break
407 except error.RepoLookupError:
408 except error.RepoLookupError:
408 continue
409 continue
409 bn = destrepo[uprev].branch()
410 bn = destrepo[uprev].branch()
410 destrepo.ui.status(_("updating to branch %s\n") % bn)
411 destrepo.ui.status(_("updating to branch %s\n") % bn)
411 _update(destrepo, uprev)
412 _update(destrepo, uprev)
412
413
413 return srcpeer, destpeer
414 return srcpeer, destpeer
414 finally:
415 finally:
415 release(srclock, destlock)
416 release(srclock, destlock)
416 if dircleanup is not None:
417 if dircleanup is not None:
417 dircleanup.cleanup()
418 dircleanup.cleanup()
418 if srcpeer is not None:
419 if srcpeer is not None:
419 srcpeer.close()
420 srcpeer.close()
420
421
421 def _showstats(repo, stats):
422 def _showstats(repo, stats):
422 repo.ui.status(_("%d files updated, %d files merged, "
423 repo.ui.status(_("%d files updated, %d files merged, "
423 "%d files removed, %d files unresolved\n") % stats)
424 "%d files removed, %d files unresolved\n") % stats)
424
425
425 def update(repo, node):
426 def update(repo, node):
426 """update the working directory to node, merging linear changes"""
427 """update the working directory to node, merging linear changes"""
427 stats = mergemod.update(repo, node, False, False, None)
428 stats = mergemod.update(repo, node, False, False, None)
428 _showstats(repo, stats)
429 _showstats(repo, stats)
429 if stats[3]:
430 if stats[3]:
430 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
431 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
431 return stats[3] > 0
432 return stats[3] > 0
432
433
433 # naming conflict in clone()
434 # naming conflict in clone()
434 _update = update
435 _update = update
435
436
436 def clean(repo, node, show_stats=True):
437 def clean(repo, node, show_stats=True):
437 """forcibly switch the working directory to node, clobbering changes"""
438 """forcibly switch the working directory to node, clobbering changes"""
438 stats = mergemod.update(repo, node, False, True, None)
439 stats = mergemod.update(repo, node, False, True, None)
439 if show_stats:
440 if show_stats:
440 _showstats(repo, stats)
441 _showstats(repo, stats)
441 return stats[3] > 0
442 return stats[3] > 0
442
443
443 def merge(repo, node, force=None, remind=True):
444 def merge(repo, node, force=None, remind=True):
444 """Branch merge with node, resolving changes. Return true if any
445 """Branch merge with node, resolving changes. Return true if any
445 unresolved conflicts."""
446 unresolved conflicts."""
446 stats = mergemod.update(repo, node, True, force, False)
447 stats = mergemod.update(repo, node, True, force, False)
447 _showstats(repo, stats)
448 _showstats(repo, stats)
448 if stats[3]:
449 if stats[3]:
449 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
450 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
450 "or 'hg update -C .' to abandon\n"))
451 "or 'hg update -C .' to abandon\n"))
451 elif remind:
452 elif remind:
452 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
453 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
453 return stats[3] > 0
454 return stats[3] > 0
454
455
455 def _incoming(displaychlist, subreporecurse, ui, repo, source,
456 def _incoming(displaychlist, subreporecurse, ui, repo, source,
456 opts, buffered=False):
457 opts, buffered=False):
457 """
458 """
458 Helper for incoming / gincoming.
459 Helper for incoming / gincoming.
459 displaychlist gets called with
460 displaychlist gets called with
460 (remoterepo, incomingchangesetlist, displayer) parameters,
461 (remoterepo, incomingchangesetlist, displayer) parameters,
461 and is supposed to contain only code that can't be unified.
462 and is supposed to contain only code that can't be unified.
462 """
463 """
463 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
464 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
464 other = peer(repo, opts, source)
465 other = peer(repo, opts, source)
465 ui.status(_('comparing with %s\n') % util.hidepassword(source))
466 ui.status(_('comparing with %s\n') % util.hidepassword(source))
466 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
467 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
467
468
468 if revs:
469 if revs:
469 revs = [other.lookup(rev) for rev in revs]
470 revs = [other.lookup(rev) for rev in revs]
470 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
471 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
471 revs, opts["bundle"], opts["force"])
472 revs, opts["bundle"], opts["force"])
472 try:
473 try:
473 if not chlist:
474 if not chlist:
474 ui.status(_("no changes found\n"))
475 ui.status(_("no changes found\n"))
475 return subreporecurse()
476 return subreporecurse()
476
477
477 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
478 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
478
479
479 # XXX once graphlog extension makes it into core,
480 # XXX once graphlog extension makes it into core,
480 # should be replaced by a if graph/else
481 # should be replaced by a if graph/else
481 displaychlist(other, chlist, displayer)
482 displaychlist(other, chlist, displayer)
482
483
483 displayer.close()
484 displayer.close()
484 finally:
485 finally:
485 cleanupfn()
486 cleanupfn()
486 subreporecurse()
487 subreporecurse()
487 return 0 # exit code is zero since we found incoming changes
488 return 0 # exit code is zero since we found incoming changes
488
489
489 def incoming(ui, repo, source, opts):
490 def incoming(ui, repo, source, opts):
490 def subreporecurse():
491 def subreporecurse():
491 ret = 1
492 ret = 1
492 if opts.get('subrepos'):
493 if opts.get('subrepos'):
493 ctx = repo[None]
494 ctx = repo[None]
494 for subpath in sorted(ctx.substate):
495 for subpath in sorted(ctx.substate):
495 sub = ctx.sub(subpath)
496 sub = ctx.sub(subpath)
496 ret = min(ret, sub.incoming(ui, source, opts))
497 ret = min(ret, sub.incoming(ui, source, opts))
497 return ret
498 return ret
498
499
499 def display(other, chlist, displayer):
500 def display(other, chlist, displayer):
500 limit = cmdutil.loglimit(opts)
501 limit = cmdutil.loglimit(opts)
501 if opts.get('newest_first'):
502 if opts.get('newest_first'):
502 chlist.reverse()
503 chlist.reverse()
503 count = 0
504 count = 0
504 for n in chlist:
505 for n in chlist:
505 if limit is not None and count >= limit:
506 if limit is not None and count >= limit:
506 break
507 break
507 parents = [p for p in other.changelog.parents(n) if p != nullid]
508 parents = [p for p in other.changelog.parents(n) if p != nullid]
508 if opts.get('no_merges') and len(parents) == 2:
509 if opts.get('no_merges') and len(parents) == 2:
509 continue
510 continue
510 count += 1
511 count += 1
511 displayer.show(other[n])
512 displayer.show(other[n])
512 return _incoming(display, subreporecurse, ui, repo, source, opts)
513 return _incoming(display, subreporecurse, ui, repo, source, opts)
513
514
514 def _outgoing(ui, repo, dest, opts):
515 def _outgoing(ui, repo, dest, opts):
515 dest = ui.expandpath(dest or 'default-push', dest or 'default')
516 dest = ui.expandpath(dest or 'default-push', dest or 'default')
516 dest, branches = parseurl(dest, opts.get('branch'))
517 dest, branches = parseurl(dest, opts.get('branch'))
517 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
518 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
518 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
519 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
519 if revs:
520 if revs:
520 revs = [repo.lookup(rev) for rev in revs]
521 revs = [repo.lookup(rev) for rev in revs]
521
522
522 other = peer(repo, opts, dest)
523 other = peer(repo, opts, dest)
523 outgoing = discovery.findcommonoutgoing(repo, other, revs,
524 outgoing = discovery.findcommonoutgoing(repo, other, revs,
524 force=opts.get('force'))
525 force=opts.get('force'))
525 o = outgoing.missing
526 o = outgoing.missing
526 if not o:
527 if not o:
527 scmutil.nochangesfound(repo.ui, outgoing.excluded)
528 scmutil.nochangesfound(repo.ui, outgoing.excluded)
528 return None
529 return None
529 return o
530 return o
530
531
531 def outgoing(ui, repo, dest, opts):
532 def outgoing(ui, repo, dest, opts):
532 def recurse():
533 def recurse():
533 ret = 1
534 ret = 1
534 if opts.get('subrepos'):
535 if opts.get('subrepos'):
535 ctx = repo[None]
536 ctx = repo[None]
536 for subpath in sorted(ctx.substate):
537 for subpath in sorted(ctx.substate):
537 sub = ctx.sub(subpath)
538 sub = ctx.sub(subpath)
538 ret = min(ret, sub.outgoing(ui, dest, opts))
539 ret = min(ret, sub.outgoing(ui, dest, opts))
539 return ret
540 return ret
540
541
541 limit = cmdutil.loglimit(opts)
542 limit = cmdutil.loglimit(opts)
542 o = _outgoing(ui, repo, dest, opts)
543 o = _outgoing(ui, repo, dest, opts)
543 if o is None:
544 if o is None:
544 return recurse()
545 return recurse()
545
546
546 if opts.get('newest_first'):
547 if opts.get('newest_first'):
547 o.reverse()
548 o.reverse()
548 displayer = cmdutil.show_changeset(ui, repo, opts)
549 displayer = cmdutil.show_changeset(ui, repo, opts)
549 count = 0
550 count = 0
550 for n in o:
551 for n in o:
551 if limit is not None and count >= limit:
552 if limit is not None and count >= limit:
552 break
553 break
553 parents = [p for p in repo.changelog.parents(n) if p != nullid]
554 parents = [p for p in repo.changelog.parents(n) if p != nullid]
554 if opts.get('no_merges') and len(parents) == 2:
555 if opts.get('no_merges') and len(parents) == 2:
555 continue
556 continue
556 count += 1
557 count += 1
557 displayer.show(repo[n])
558 displayer.show(repo[n])
558 displayer.close()
559 displayer.close()
559 recurse()
560 recurse()
560 return 0 # exit code is zero since we found outgoing changes
561 return 0 # exit code is zero since we found outgoing changes
561
562
562 def revert(repo, node, choose):
563 def revert(repo, node, choose):
563 """revert changes to revision in node without updating dirstate"""
564 """revert changes to revision in node without updating dirstate"""
564 return mergemod.update(repo, node, False, True, choose)[3] > 0
565 return mergemod.update(repo, node, False, True, choose)[3] > 0
565
566
566 def verify(repo):
567 def verify(repo):
567 """verify the consistency of a repository"""
568 """verify the consistency of a repository"""
568 return verifymod.verify(repo)
569 return verifymod.verify(repo)
569
570
570 def remoteui(src, opts):
571 def remoteui(src, opts):
571 'build a remote ui from ui or repo and opts'
572 'build a remote ui from ui or repo and opts'
572 if util.safehasattr(src, 'baseui'): # looks like a repository
573 if util.safehasattr(src, 'baseui'): # looks like a repository
573 dst = src.baseui.copy() # drop repo-specific config
574 dst = src.baseui.copy() # drop repo-specific config
574 src = src.ui # copy target options from repo
575 src = src.ui # copy target options from repo
575 else: # assume it's a global ui object
576 else: # assume it's a global ui object
576 dst = src.copy() # keep all global options
577 dst = src.copy() # keep all global options
577
578
578 # copy ssh-specific options
579 # copy ssh-specific options
579 for o in 'ssh', 'remotecmd':
580 for o in 'ssh', 'remotecmd':
580 v = opts.get(o) or src.config('ui', o)
581 v = opts.get(o) or src.config('ui', o)
581 if v:
582 if v:
582 dst.setconfig("ui", o, v)
583 dst.setconfig("ui", o, v)
583
584
584 # copy bundle-specific options
585 # copy bundle-specific options
585 r = src.config('bundle', 'mainreporoot')
586 r = src.config('bundle', 'mainreporoot')
586 if r:
587 if r:
587 dst.setconfig('bundle', 'mainreporoot', r)
588 dst.setconfig('bundle', 'mainreporoot', r)
588
589
589 # copy selected local settings to the remote ui
590 # copy selected local settings to the remote ui
590 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
591 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
591 for key, val in src.configitems(sect):
592 for key, val in src.configitems(sect):
592 dst.setconfig(sect, key, val)
593 dst.setconfig(sect, key, val)
593 v = src.config('web', 'cacerts')
594 v = src.config('web', 'cacerts')
594 if v:
595 if v:
595 dst.setconfig('web', 'cacerts', util.expandpath(v))
596 dst.setconfig('web', 'cacerts', util.expandpath(v))
596
597
597 return dst
598 return dst
@@ -1,2571 +1,2568
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def cancopy(self):
50 return self._repo.cancopy() # so bundlerepo can override
51
52 def canpush(self):
49 def canpush(self):
53 return True
50 return True
54
51
55 def url(self):
52 def url(self):
56 return self._repo.url()
53 return self._repo.url()
57
54
58 def lookup(self, key):
55 def lookup(self, key):
59 return self._repo.lookup(key)
56 return self._repo.lookup(key)
60
57
61 def branchmap(self):
58 def branchmap(self):
62 return self._repo.branchmap()
59 return self._repo.branchmap()
63
60
64 def heads(self):
61 def heads(self):
65 return self._repo.heads()
62 return self._repo.heads()
66
63
67 def known(self, nodes):
64 def known(self, nodes):
68 return self._repo.known(nodes)
65 return self._repo.known(nodes)
69
66
70 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
71 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
72
69
73 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
74 # unbundle instead.
71 # unbundle instead.
75
72
76 def lock(self):
73 def lock(self):
77 return self._repo.lock()
74 return self._repo.lock()
78
75
79 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
80 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
81
78
82 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
83 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
84
81
85 def listkeys(self, namespace):
82 def listkeys(self, namespace):
86 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
87
84
88 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
89 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
90 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
91
88
92 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
93 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
94 restricted capabilities'''
91 restricted capabilities'''
95
92
96 def __init__(self, repo):
93 def __init__(self, repo):
97 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
98
95
99 def branches(self, nodes):
96 def branches(self, nodes):
100 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
101
98
102 def between(self, pairs):
99 def between(self, pairs):
103 return self._repo.between(pairs)
100 return self._repo.between(pairs)
104
101
105 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
106 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
107
104
108 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
109 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
110
107
111 class localrepository(object):
108 class localrepository(object):
112
109
113 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
114 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
115 'dotencode'))
112 'dotencode'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
117 requirements = ['revlogv1']
114 requirements = ['revlogv1']
118
115
119 def _baserequirements(self, create):
116 def _baserequirements(self, create):
120 return self.requirements[:]
117 return self.requirements[:]
121
118
122 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
123 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
124 self.wvfs = self.wopener
121 self.wvfs = self.wopener
125 self.root = self.wvfs.base
122 self.root = self.wvfs.base
126 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
127 self.origroot = path
124 self.origroot = path
128 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
129 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
130 self.vfs = self.opener
127 self.vfs = self.opener
131 self.baseui = baseui
128 self.baseui = baseui
132 self.ui = baseui.copy()
129 self.ui = baseui.copy()
133 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
134 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
135 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
136 self._phasedefaults = []
133 self._phasedefaults = []
137
134
138 try:
135 try:
139 self.ui.readconfig(self.join("hgrc"), self.root)
136 self.ui.readconfig(self.join("hgrc"), self.root)
140 extensions.loadall(self.ui)
137 extensions.loadall(self.ui)
141 except IOError:
138 except IOError:
142 pass
139 pass
143
140
144 if not self.vfs.isdir():
141 if not self.vfs.isdir():
145 if create:
142 if create:
146 if not self.wvfs.exists():
143 if not self.wvfs.exists():
147 self.wvfs.makedirs()
144 self.wvfs.makedirs()
148 self.vfs.makedir(notindexed=True)
145 self.vfs.makedir(notindexed=True)
149 requirements = self._baserequirements(create)
146 requirements = self._baserequirements(create)
150 if self.ui.configbool('format', 'usestore', True):
147 if self.ui.configbool('format', 'usestore', True):
151 self.vfs.mkdir("store")
148 self.vfs.mkdir("store")
152 requirements.append("store")
149 requirements.append("store")
153 if self.ui.configbool('format', 'usefncache', True):
150 if self.ui.configbool('format', 'usefncache', True):
154 requirements.append("fncache")
151 requirements.append("fncache")
155 if self.ui.configbool('format', 'dotencode', True):
152 if self.ui.configbool('format', 'dotencode', True):
156 requirements.append('dotencode')
153 requirements.append('dotencode')
157 # create an invalid changelog
154 # create an invalid changelog
158 self.vfs.append(
155 self.vfs.append(
159 "00changelog.i",
156 "00changelog.i",
160 '\0\0\0\2' # represents revlogv2
157 '\0\0\0\2' # represents revlogv2
161 ' dummy changelog to prevent using the old repo layout'
158 ' dummy changelog to prevent using the old repo layout'
162 )
159 )
163 if self.ui.configbool('format', 'generaldelta', False):
160 if self.ui.configbool('format', 'generaldelta', False):
164 requirements.append("generaldelta")
161 requirements.append("generaldelta")
165 requirements = set(requirements)
162 requirements = set(requirements)
166 else:
163 else:
167 raise error.RepoError(_("repository %s not found") % path)
164 raise error.RepoError(_("repository %s not found") % path)
168 elif create:
165 elif create:
169 raise error.RepoError(_("repository %s already exists") % path)
166 raise error.RepoError(_("repository %s already exists") % path)
170 else:
167 else:
171 try:
168 try:
172 requirements = scmutil.readrequires(self.vfs, self.supported)
169 requirements = scmutil.readrequires(self.vfs, self.supported)
173 except IOError, inst:
170 except IOError, inst:
174 if inst.errno != errno.ENOENT:
171 if inst.errno != errno.ENOENT:
175 raise
172 raise
176 requirements = set()
173 requirements = set()
177
174
178 self.sharedpath = self.path
175 self.sharedpath = self.path
179 try:
176 try:
180 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
181 if not os.path.exists(s):
178 if not os.path.exists(s):
182 raise error.RepoError(
179 raise error.RepoError(
183 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 _('.hg/sharedpath points to nonexistent directory %s') % s)
184 self.sharedpath = s
181 self.sharedpath = s
185 except IOError, inst:
182 except IOError, inst:
186 if inst.errno != errno.ENOENT:
183 if inst.errno != errno.ENOENT:
187 raise
184 raise
188
185
189 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
190 self.spath = self.store.path
187 self.spath = self.store.path
191 self.sopener = self.store.opener
188 self.sopener = self.store.opener
192 self.svfs = self.sopener
189 self.svfs = self.sopener
193 self.sjoin = self.store.join
190 self.sjoin = self.store.join
194 self.opener.createmode = self.store.createmode
191 self.opener.createmode = self.store.createmode
195 self._applyrequirements(requirements)
192 self._applyrequirements(requirements)
196 if create:
193 if create:
197 self._writerequirements()
194 self._writerequirements()
198
195
199
196
200 self._branchcache = None
197 self._branchcache = None
201 self._branchcachetip = None
198 self._branchcachetip = None
202 self.filterpats = {}
199 self.filterpats = {}
203 self._datafilters = {}
200 self._datafilters = {}
204 self._transref = self._lockref = self._wlockref = None
201 self._transref = self._lockref = self._wlockref = None
205
202
206 # A cache for various files under .hg/ that tracks file changes,
203 # A cache for various files under .hg/ that tracks file changes,
207 # (used by the filecache decorator)
204 # (used by the filecache decorator)
208 #
205 #
209 # Maps a property name to its util.filecacheentry
206 # Maps a property name to its util.filecacheentry
210 self._filecache = {}
207 self._filecache = {}
211
208
212 def close(self):
209 def close(self):
213 pass
210 pass
214
211
215 def _restrictcapabilities(self, caps):
212 def _restrictcapabilities(self, caps):
216 return caps
213 return caps
217
214
218 def _applyrequirements(self, requirements):
215 def _applyrequirements(self, requirements):
219 self.requirements = requirements
216 self.requirements = requirements
220 self.sopener.options = dict((r, 1) for r in requirements
217 self.sopener.options = dict((r, 1) for r in requirements
221 if r in self.openerreqs)
218 if r in self.openerreqs)
222
219
223 def _writerequirements(self):
220 def _writerequirements(self):
224 reqfile = self.opener("requires", "w")
221 reqfile = self.opener("requires", "w")
225 for r in self.requirements:
222 for r in self.requirements:
226 reqfile.write("%s\n" % r)
223 reqfile.write("%s\n" % r)
227 reqfile.close()
224 reqfile.close()
228
225
229 def _checknested(self, path):
226 def _checknested(self, path):
230 """Determine if path is a legal nested repository."""
227 """Determine if path is a legal nested repository."""
231 if not path.startswith(self.root):
228 if not path.startswith(self.root):
232 return False
229 return False
233 subpath = path[len(self.root) + 1:]
230 subpath = path[len(self.root) + 1:]
234 normsubpath = util.pconvert(subpath)
231 normsubpath = util.pconvert(subpath)
235
232
236 # XXX: Checking against the current working copy is wrong in
233 # XXX: Checking against the current working copy is wrong in
237 # the sense that it can reject things like
234 # the sense that it can reject things like
238 #
235 #
239 # $ hg cat -r 10 sub/x.txt
236 # $ hg cat -r 10 sub/x.txt
240 #
237 #
241 # if sub/ is no longer a subrepository in the working copy
238 # if sub/ is no longer a subrepository in the working copy
242 # parent revision.
239 # parent revision.
243 #
240 #
244 # However, it can of course also allow things that would have
241 # However, it can of course also allow things that would have
245 # been rejected before, such as the above cat command if sub/
242 # been rejected before, such as the above cat command if sub/
246 # is a subrepository now, but was a normal directory before.
243 # is a subrepository now, but was a normal directory before.
247 # The old path auditor would have rejected by mistake since it
244 # The old path auditor would have rejected by mistake since it
248 # panics when it sees sub/.hg/.
245 # panics when it sees sub/.hg/.
249 #
246 #
250 # All in all, checking against the working copy seems sensible
247 # All in all, checking against the working copy seems sensible
251 # since we want to prevent access to nested repositories on
248 # since we want to prevent access to nested repositories on
252 # the filesystem *now*.
249 # the filesystem *now*.
253 ctx = self[None]
250 ctx = self[None]
254 parts = util.splitpath(subpath)
251 parts = util.splitpath(subpath)
255 while parts:
252 while parts:
256 prefix = '/'.join(parts)
253 prefix = '/'.join(parts)
257 if prefix in ctx.substate:
254 if prefix in ctx.substate:
258 if prefix == normsubpath:
255 if prefix == normsubpath:
259 return True
256 return True
260 else:
257 else:
261 sub = ctx.sub(prefix)
258 sub = ctx.sub(prefix)
262 return sub.checknested(subpath[len(prefix) + 1:])
259 return sub.checknested(subpath[len(prefix) + 1:])
263 else:
260 else:
264 parts.pop()
261 parts.pop()
265 return False
262 return False
266
263
267 def peer(self):
264 def peer(self):
268 return localpeer(self) # not cached to avoid reference cycle
265 return localpeer(self) # not cached to avoid reference cycle
269
266
270 @filecache('bookmarks')
267 @filecache('bookmarks')
271 def _bookmarks(self):
268 def _bookmarks(self):
272 return bookmarks.read(self)
269 return bookmarks.read(self)
273
270
274 @filecache('bookmarks.current')
271 @filecache('bookmarks.current')
275 def _bookmarkcurrent(self):
272 def _bookmarkcurrent(self):
276 return bookmarks.readcurrent(self)
273 return bookmarks.readcurrent(self)
277
274
278 def _writebookmarks(self, marks):
275 def _writebookmarks(self, marks):
279 bookmarks.write(self)
276 bookmarks.write(self)
280
277
281 def bookmarkheads(self, bookmark):
278 def bookmarkheads(self, bookmark):
282 name = bookmark.split('@', 1)[0]
279 name = bookmark.split('@', 1)[0]
283 heads = []
280 heads = []
284 for mark, n in self._bookmarks.iteritems():
281 for mark, n in self._bookmarks.iteritems():
285 if mark.split('@', 1)[0] == name:
282 if mark.split('@', 1)[0] == name:
286 heads.append(n)
283 heads.append(n)
287 return heads
284 return heads
288
285
289 @storecache('phaseroots')
286 @storecache('phaseroots')
290 def _phasecache(self):
287 def _phasecache(self):
291 return phases.phasecache(self, self._phasedefaults)
288 return phases.phasecache(self, self._phasedefaults)
292
289
293 @storecache('obsstore')
290 @storecache('obsstore')
294 def obsstore(self):
291 def obsstore(self):
295 store = obsolete.obsstore(self.sopener)
292 store = obsolete.obsstore(self.sopener)
296 return store
293 return store
297
294
298 @storecache('00changelog.i')
295 @storecache('00changelog.i')
299 def changelog(self):
296 def changelog(self):
300 c = changelog.changelog(self.sopener)
297 c = changelog.changelog(self.sopener)
301 if 'HG_PENDING' in os.environ:
298 if 'HG_PENDING' in os.environ:
302 p = os.environ['HG_PENDING']
299 p = os.environ['HG_PENDING']
303 if p.startswith(self.root):
300 if p.startswith(self.root):
304 c.readpending('00changelog.i.a')
301 c.readpending('00changelog.i.a')
305 return c
302 return c
306
303
307 @storecache('00manifest.i')
304 @storecache('00manifest.i')
308 def manifest(self):
305 def manifest(self):
309 return manifest.manifest(self.sopener)
306 return manifest.manifest(self.sopener)
310
307
311 @filecache('dirstate')
308 @filecache('dirstate')
312 def dirstate(self):
309 def dirstate(self):
313 warned = [0]
310 warned = [0]
314 def validate(node):
311 def validate(node):
315 try:
312 try:
316 self.changelog.rev(node)
313 self.changelog.rev(node)
317 return node
314 return node
318 except error.LookupError:
315 except error.LookupError:
319 if not warned[0]:
316 if not warned[0]:
320 warned[0] = True
317 warned[0] = True
321 self.ui.warn(_("warning: ignoring unknown"
318 self.ui.warn(_("warning: ignoring unknown"
322 " working parent %s!\n") % short(node))
319 " working parent %s!\n") % short(node))
323 return nullid
320 return nullid
324
321
325 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
322 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
326
323
327 def __getitem__(self, changeid):
324 def __getitem__(self, changeid):
328 if changeid is None:
325 if changeid is None:
329 return context.workingctx(self)
326 return context.workingctx(self)
330 return context.changectx(self, changeid)
327 return context.changectx(self, changeid)
331
328
332 def __contains__(self, changeid):
329 def __contains__(self, changeid):
333 try:
330 try:
334 return bool(self.lookup(changeid))
331 return bool(self.lookup(changeid))
335 except error.RepoLookupError:
332 except error.RepoLookupError:
336 return False
333 return False
337
334
338 def __nonzero__(self):
335 def __nonzero__(self):
339 return True
336 return True
340
337
341 def __len__(self):
338 def __len__(self):
342 return len(self.changelog)
339 return len(self.changelog)
343
340
344 def __iter__(self):
341 def __iter__(self):
345 for i in xrange(len(self)):
342 for i in xrange(len(self)):
346 yield i
343 yield i
347
344
348 def revs(self, expr, *args):
345 def revs(self, expr, *args):
349 '''Return a list of revisions matching the given revset'''
346 '''Return a list of revisions matching the given revset'''
350 expr = revset.formatspec(expr, *args)
347 expr = revset.formatspec(expr, *args)
351 m = revset.match(None, expr)
348 m = revset.match(None, expr)
352 return [r for r in m(self, range(len(self)))]
349 return [r for r in m(self, range(len(self)))]
353
350
354 def set(self, expr, *args):
351 def set(self, expr, *args):
355 '''
352 '''
356 Yield a context for each matching revision, after doing arg
353 Yield a context for each matching revision, after doing arg
357 replacement via revset.formatspec
354 replacement via revset.formatspec
358 '''
355 '''
359 for r in self.revs(expr, *args):
356 for r in self.revs(expr, *args):
360 yield self[r]
357 yield self[r]
361
358
362 def url(self):
359 def url(self):
363 return 'file:' + self.root
360 return 'file:' + self.root
364
361
365 def hook(self, name, throw=False, **args):
362 def hook(self, name, throw=False, **args):
366 return hook.hook(self.ui, self, name, throw, **args)
363 return hook.hook(self.ui, self, name, throw, **args)
367
364
368 tag_disallowed = ':\r\n'
365 tag_disallowed = ':\r\n'
369
366
370 def _tag(self, names, node, message, local, user, date, extra={}):
367 def _tag(self, names, node, message, local, user, date, extra={}):
371 if isinstance(names, str):
368 if isinstance(names, str):
372 allchars = names
369 allchars = names
373 names = (names,)
370 names = (names,)
374 else:
371 else:
375 allchars = ''.join(names)
372 allchars = ''.join(names)
376 for c in self.tag_disallowed:
373 for c in self.tag_disallowed:
377 if c in allchars:
374 if c in allchars:
378 raise util.Abort(_('%r cannot be used in a tag name') % c)
375 raise util.Abort(_('%r cannot be used in a tag name') % c)
379
376
380 branches = self.branchmap()
377 branches = self.branchmap()
381 for name in names:
378 for name in names:
382 self.hook('pretag', throw=True, node=hex(node), tag=name,
379 self.hook('pretag', throw=True, node=hex(node), tag=name,
383 local=local)
380 local=local)
384 if name in branches:
381 if name in branches:
385 self.ui.warn(_("warning: tag %s conflicts with existing"
382 self.ui.warn(_("warning: tag %s conflicts with existing"
386 " branch name\n") % name)
383 " branch name\n") % name)
387
384
388 def writetags(fp, names, munge, prevtags):
385 def writetags(fp, names, munge, prevtags):
389 fp.seek(0, 2)
386 fp.seek(0, 2)
390 if prevtags and prevtags[-1] != '\n':
387 if prevtags and prevtags[-1] != '\n':
391 fp.write('\n')
388 fp.write('\n')
392 for name in names:
389 for name in names:
393 m = munge and munge(name) or name
390 m = munge and munge(name) or name
394 if (self._tagscache.tagtypes and
391 if (self._tagscache.tagtypes and
395 name in self._tagscache.tagtypes):
392 name in self._tagscache.tagtypes):
396 old = self.tags().get(name, nullid)
393 old = self.tags().get(name, nullid)
397 fp.write('%s %s\n' % (hex(old), m))
394 fp.write('%s %s\n' % (hex(old), m))
398 fp.write('%s %s\n' % (hex(node), m))
395 fp.write('%s %s\n' % (hex(node), m))
399 fp.close()
396 fp.close()
400
397
401 prevtags = ''
398 prevtags = ''
402 if local:
399 if local:
403 try:
400 try:
404 fp = self.opener('localtags', 'r+')
401 fp = self.opener('localtags', 'r+')
405 except IOError:
402 except IOError:
406 fp = self.opener('localtags', 'a')
403 fp = self.opener('localtags', 'a')
407 else:
404 else:
408 prevtags = fp.read()
405 prevtags = fp.read()
409
406
410 # local tags are stored in the current charset
407 # local tags are stored in the current charset
411 writetags(fp, names, None, prevtags)
408 writetags(fp, names, None, prevtags)
412 for name in names:
409 for name in names:
413 self.hook('tag', node=hex(node), tag=name, local=local)
410 self.hook('tag', node=hex(node), tag=name, local=local)
414 return
411 return
415
412
416 try:
413 try:
417 fp = self.wfile('.hgtags', 'rb+')
414 fp = self.wfile('.hgtags', 'rb+')
418 except IOError, e:
415 except IOError, e:
419 if e.errno != errno.ENOENT:
416 if e.errno != errno.ENOENT:
420 raise
417 raise
421 fp = self.wfile('.hgtags', 'ab')
418 fp = self.wfile('.hgtags', 'ab')
422 else:
419 else:
423 prevtags = fp.read()
420 prevtags = fp.read()
424
421
425 # committed tags are stored in UTF-8
422 # committed tags are stored in UTF-8
426 writetags(fp, names, encoding.fromlocal, prevtags)
423 writetags(fp, names, encoding.fromlocal, prevtags)
427
424
428 fp.close()
425 fp.close()
429
426
430 self.invalidatecaches()
427 self.invalidatecaches()
431
428
432 if '.hgtags' not in self.dirstate:
429 if '.hgtags' not in self.dirstate:
433 self[None].add(['.hgtags'])
430 self[None].add(['.hgtags'])
434
431
435 m = matchmod.exact(self.root, '', ['.hgtags'])
432 m = matchmod.exact(self.root, '', ['.hgtags'])
436 tagnode = self.commit(message, user, date, extra=extra, match=m)
433 tagnode = self.commit(message, user, date, extra=extra, match=m)
437
434
438 for name in names:
435 for name in names:
439 self.hook('tag', node=hex(node), tag=name, local=local)
436 self.hook('tag', node=hex(node), tag=name, local=local)
440
437
441 return tagnode
438 return tagnode
442
439
443 def tag(self, names, node, message, local, user, date):
440 def tag(self, names, node, message, local, user, date):
444 '''tag a revision with one or more symbolic names.
441 '''tag a revision with one or more symbolic names.
445
442
446 names is a list of strings or, when adding a single tag, names may be a
443 names is a list of strings or, when adding a single tag, names may be a
447 string.
444 string.
448
445
449 if local is True, the tags are stored in a per-repository file.
446 if local is True, the tags are stored in a per-repository file.
450 otherwise, they are stored in the .hgtags file, and a new
447 otherwise, they are stored in the .hgtags file, and a new
451 changeset is committed with the change.
448 changeset is committed with the change.
452
449
453 keyword arguments:
450 keyword arguments:
454
451
455 local: whether to store tags in non-version-controlled file
452 local: whether to store tags in non-version-controlled file
456 (default False)
453 (default False)
457
454
458 message: commit message to use if committing
455 message: commit message to use if committing
459
456
460 user: name of user to use if committing
457 user: name of user to use if committing
461
458
462 date: date tuple to use if committing'''
459 date: date tuple to use if committing'''
463
460
464 if not local:
461 if not local:
465 for x in self.status()[:5]:
462 for x in self.status()[:5]:
466 if '.hgtags' in x:
463 if '.hgtags' in x:
467 raise util.Abort(_('working copy of .hgtags is changed '
464 raise util.Abort(_('working copy of .hgtags is changed '
468 '(please commit .hgtags manually)'))
465 '(please commit .hgtags manually)'))
469
466
470 self.tags() # instantiate the cache
467 self.tags() # instantiate the cache
471 self._tag(names, node, message, local, user, date)
468 self._tag(names, node, message, local, user, date)
472
469
473 @propertycache
470 @propertycache
474 def _tagscache(self):
471 def _tagscache(self):
475 '''Returns a tagscache object that contains various tags related
472 '''Returns a tagscache object that contains various tags related
476 caches.'''
473 caches.'''
477
474
478 # This simplifies its cache management by having one decorated
475 # This simplifies its cache management by having one decorated
479 # function (this one) and the rest simply fetch things from it.
476 # function (this one) and the rest simply fetch things from it.
480 class tagscache(object):
477 class tagscache(object):
481 def __init__(self):
478 def __init__(self):
482 # These two define the set of tags for this repository. tags
479 # These two define the set of tags for this repository. tags
483 # maps tag name to node; tagtypes maps tag name to 'global' or
480 # maps tag name to node; tagtypes maps tag name to 'global' or
484 # 'local'. (Global tags are defined by .hgtags across all
481 # 'local'. (Global tags are defined by .hgtags across all
485 # heads, and local tags are defined in .hg/localtags.)
482 # heads, and local tags are defined in .hg/localtags.)
486 # They constitute the in-memory cache of tags.
483 # They constitute the in-memory cache of tags.
487 self.tags = self.tagtypes = None
484 self.tags = self.tagtypes = None
488
485
489 self.nodetagscache = self.tagslist = None
486 self.nodetagscache = self.tagslist = None
490
487
491 cache = tagscache()
488 cache = tagscache()
492 cache.tags, cache.tagtypes = self._findtags()
489 cache.tags, cache.tagtypes = self._findtags()
493
490
494 return cache
491 return cache
495
492
496 def tags(self):
493 def tags(self):
497 '''return a mapping of tag to node'''
494 '''return a mapping of tag to node'''
498 t = {}
495 t = {}
499 for k, v in self._tagscache.tags.iteritems():
496 for k, v in self._tagscache.tags.iteritems():
500 try:
497 try:
501 # ignore tags to unknown nodes
498 # ignore tags to unknown nodes
502 self.changelog.rev(v)
499 self.changelog.rev(v)
503 t[k] = v
500 t[k] = v
504 except (error.LookupError, ValueError):
501 except (error.LookupError, ValueError):
505 pass
502 pass
506 return t
503 return t
507
504
508 def _findtags(self):
505 def _findtags(self):
509 '''Do the hard work of finding tags. Return a pair of dicts
506 '''Do the hard work of finding tags. Return a pair of dicts
510 (tags, tagtypes) where tags maps tag name to node, and tagtypes
507 (tags, tagtypes) where tags maps tag name to node, and tagtypes
511 maps tag name to a string like \'global\' or \'local\'.
508 maps tag name to a string like \'global\' or \'local\'.
512 Subclasses or extensions are free to add their own tags, but
509 Subclasses or extensions are free to add their own tags, but
513 should be aware that the returned dicts will be retained for the
510 should be aware that the returned dicts will be retained for the
514 duration of the localrepo object.'''
511 duration of the localrepo object.'''
515
512
516 # XXX what tagtype should subclasses/extensions use? Currently
513 # XXX what tagtype should subclasses/extensions use? Currently
517 # mq and bookmarks add tags, but do not set the tagtype at all.
514 # mq and bookmarks add tags, but do not set the tagtype at all.
518 # Should each extension invent its own tag type? Should there
515 # Should each extension invent its own tag type? Should there
519 # be one tagtype for all such "virtual" tags? Or is the status
516 # be one tagtype for all such "virtual" tags? Or is the status
520 # quo fine?
517 # quo fine?
521
518
522 alltags = {} # map tag name to (node, hist)
519 alltags = {} # map tag name to (node, hist)
523 tagtypes = {}
520 tagtypes = {}
524
521
525 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
522 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
526 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
523 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
527
524
528 # Build the return dicts. Have to re-encode tag names because
525 # Build the return dicts. Have to re-encode tag names because
529 # the tags module always uses UTF-8 (in order not to lose info
526 # the tags module always uses UTF-8 (in order not to lose info
530 # writing to the cache), but the rest of Mercurial wants them in
527 # writing to the cache), but the rest of Mercurial wants them in
531 # local encoding.
528 # local encoding.
532 tags = {}
529 tags = {}
533 for (name, (node, hist)) in alltags.iteritems():
530 for (name, (node, hist)) in alltags.iteritems():
534 if node != nullid:
531 if node != nullid:
535 tags[encoding.tolocal(name)] = node
532 tags[encoding.tolocal(name)] = node
536 tags['tip'] = self.changelog.tip()
533 tags['tip'] = self.changelog.tip()
537 tagtypes = dict([(encoding.tolocal(name), value)
534 tagtypes = dict([(encoding.tolocal(name), value)
538 for (name, value) in tagtypes.iteritems()])
535 for (name, value) in tagtypes.iteritems()])
539 return (tags, tagtypes)
536 return (tags, tagtypes)
540
537
541 def tagtype(self, tagname):
538 def tagtype(self, tagname):
542 '''
539 '''
543 return the type of the given tag. result can be:
540 return the type of the given tag. result can be:
544
541
545 'local' : a local tag
542 'local' : a local tag
546 'global' : a global tag
543 'global' : a global tag
547 None : tag does not exist
544 None : tag does not exist
548 '''
545 '''
549
546
550 return self._tagscache.tagtypes.get(tagname)
547 return self._tagscache.tagtypes.get(tagname)
551
548
552 def tagslist(self):
549 def tagslist(self):
553 '''return a list of tags ordered by revision'''
550 '''return a list of tags ordered by revision'''
554 if not self._tagscache.tagslist:
551 if not self._tagscache.tagslist:
555 l = []
552 l = []
556 for t, n in self.tags().iteritems():
553 for t, n in self.tags().iteritems():
557 r = self.changelog.rev(n)
554 r = self.changelog.rev(n)
558 l.append((r, t, n))
555 l.append((r, t, n))
559 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
556 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
560
557
561 return self._tagscache.tagslist
558 return self._tagscache.tagslist
562
559
563 def nodetags(self, node):
560 def nodetags(self, node):
564 '''return the tags associated with a node'''
561 '''return the tags associated with a node'''
565 if not self._tagscache.nodetagscache:
562 if not self._tagscache.nodetagscache:
566 nodetagscache = {}
563 nodetagscache = {}
567 for t, n in self._tagscache.tags.iteritems():
564 for t, n in self._tagscache.tags.iteritems():
568 nodetagscache.setdefault(n, []).append(t)
565 nodetagscache.setdefault(n, []).append(t)
569 for tags in nodetagscache.itervalues():
566 for tags in nodetagscache.itervalues():
570 tags.sort()
567 tags.sort()
571 self._tagscache.nodetagscache = nodetagscache
568 self._tagscache.nodetagscache = nodetagscache
572 return self._tagscache.nodetagscache.get(node, [])
569 return self._tagscache.nodetagscache.get(node, [])
573
570
574 def nodebookmarks(self, node):
571 def nodebookmarks(self, node):
575 marks = []
572 marks = []
576 for bookmark, n in self._bookmarks.iteritems():
573 for bookmark, n in self._bookmarks.iteritems():
577 if n == node:
574 if n == node:
578 marks.append(bookmark)
575 marks.append(bookmark)
579 return sorted(marks)
576 return sorted(marks)
580
577
581 def _branchtags(self, partial, lrev):
578 def _branchtags(self, partial, lrev):
582 # TODO: rename this function?
579 # TODO: rename this function?
583 tiprev = len(self) - 1
580 tiprev = len(self) - 1
584 if lrev != tiprev:
581 if lrev != tiprev:
585 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
582 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
586 self._updatebranchcache(partial, ctxgen)
583 self._updatebranchcache(partial, ctxgen)
587 self._writebranchcache(partial, self.changelog.tip(), tiprev)
584 self._writebranchcache(partial, self.changelog.tip(), tiprev)
588
585
589 return partial
586 return partial
590
587
591 def updatebranchcache(self):
588 def updatebranchcache(self):
592 tip = self.changelog.tip()
589 tip = self.changelog.tip()
593 if self._branchcache is not None and self._branchcachetip == tip:
590 if self._branchcache is not None and self._branchcachetip == tip:
594 return
591 return
595
592
596 oldtip = self._branchcachetip
593 oldtip = self._branchcachetip
597 self._branchcachetip = tip
594 self._branchcachetip = tip
598 if oldtip is None or oldtip not in self.changelog.nodemap:
595 if oldtip is None or oldtip not in self.changelog.nodemap:
599 partial, last, lrev = self._readbranchcache()
596 partial, last, lrev = self._readbranchcache()
600 else:
597 else:
601 lrev = self.changelog.rev(oldtip)
598 lrev = self.changelog.rev(oldtip)
602 partial = self._branchcache
599 partial = self._branchcache
603
600
604 self._branchtags(partial, lrev)
601 self._branchtags(partial, lrev)
605 # this private cache holds all heads (not just the branch tips)
602 # this private cache holds all heads (not just the branch tips)
606 self._branchcache = partial
603 self._branchcache = partial
607
604
608 def branchmap(self):
605 def branchmap(self):
609 '''returns a dictionary {branch: [branchheads]}'''
606 '''returns a dictionary {branch: [branchheads]}'''
610 self.updatebranchcache()
607 self.updatebranchcache()
611 return self._branchcache
608 return self._branchcache
612
609
613 def _branchtip(self, heads):
610 def _branchtip(self, heads):
614 '''return the tipmost branch head in heads'''
611 '''return the tipmost branch head in heads'''
615 tip = heads[-1]
612 tip = heads[-1]
616 for h in reversed(heads):
613 for h in reversed(heads):
617 if not self[h].closesbranch():
614 if not self[h].closesbranch():
618 tip = h
615 tip = h
619 break
616 break
620 return tip
617 return tip
621
618
622 def branchtip(self, branch):
619 def branchtip(self, branch):
623 '''return the tip node for a given branch'''
620 '''return the tip node for a given branch'''
624 if branch not in self.branchmap():
621 if branch not in self.branchmap():
625 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
622 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
626 return self._branchtip(self.branchmap()[branch])
623 return self._branchtip(self.branchmap()[branch])
627
624
628 def branchtags(self):
625 def branchtags(self):
629 '''return a dict where branch names map to the tipmost head of
626 '''return a dict where branch names map to the tipmost head of
630 the branch, open heads come before closed'''
627 the branch, open heads come before closed'''
631 bt = {}
628 bt = {}
632 for bn, heads in self.branchmap().iteritems():
629 for bn, heads in self.branchmap().iteritems():
633 bt[bn] = self._branchtip(heads)
630 bt[bn] = self._branchtip(heads)
634 return bt
631 return bt
635
632
636 def _readbranchcache(self):
633 def _readbranchcache(self):
637 partial = {}
634 partial = {}
638 try:
635 try:
639 f = self.opener("cache/branchheads")
636 f = self.opener("cache/branchheads")
640 lines = f.read().split('\n')
637 lines = f.read().split('\n')
641 f.close()
638 f.close()
642 except (IOError, OSError):
639 except (IOError, OSError):
643 return {}, nullid, nullrev
640 return {}, nullid, nullrev
644
641
645 try:
642 try:
646 last, lrev = lines.pop(0).split(" ", 1)
643 last, lrev = lines.pop(0).split(" ", 1)
647 last, lrev = bin(last), int(lrev)
644 last, lrev = bin(last), int(lrev)
648 if lrev >= len(self) or self[lrev].node() != last:
645 if lrev >= len(self) or self[lrev].node() != last:
649 # invalidate the cache
646 # invalidate the cache
650 raise ValueError('invalidating branch cache (tip differs)')
647 raise ValueError('invalidating branch cache (tip differs)')
651 for l in lines:
648 for l in lines:
652 if not l:
649 if not l:
653 continue
650 continue
654 node, label = l.split(" ", 1)
651 node, label = l.split(" ", 1)
655 label = encoding.tolocal(label.strip())
652 label = encoding.tolocal(label.strip())
656 if not node in self:
653 if not node in self:
657 raise ValueError('invalidating branch cache because node '+
654 raise ValueError('invalidating branch cache because node '+
658 '%s does not exist' % node)
655 '%s does not exist' % node)
659 partial.setdefault(label, []).append(bin(node))
656 partial.setdefault(label, []).append(bin(node))
660 except KeyboardInterrupt:
657 except KeyboardInterrupt:
661 raise
658 raise
662 except Exception, inst:
659 except Exception, inst:
663 if self.ui.debugflag:
660 if self.ui.debugflag:
664 self.ui.warn(str(inst), '\n')
661 self.ui.warn(str(inst), '\n')
665 partial, last, lrev = {}, nullid, nullrev
662 partial, last, lrev = {}, nullid, nullrev
666 return partial, last, lrev
663 return partial, last, lrev
667
664
668 def _writebranchcache(self, branches, tip, tiprev):
665 def _writebranchcache(self, branches, tip, tiprev):
669 try:
666 try:
670 f = self.opener("cache/branchheads", "w", atomictemp=True)
667 f = self.opener("cache/branchheads", "w", atomictemp=True)
671 f.write("%s %s\n" % (hex(tip), tiprev))
668 f.write("%s %s\n" % (hex(tip), tiprev))
672 for label, nodes in branches.iteritems():
669 for label, nodes in branches.iteritems():
673 for node in nodes:
670 for node in nodes:
674 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
671 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
675 f.close()
672 f.close()
676 except (IOError, OSError):
673 except (IOError, OSError):
677 pass
674 pass
678
675
679 def _updatebranchcache(self, partial, ctxgen):
676 def _updatebranchcache(self, partial, ctxgen):
680 """Given a branchhead cache, partial, that may have extra nodes or be
677 """Given a branchhead cache, partial, that may have extra nodes or be
681 missing heads, and a generator of nodes that are at least a superset of
678 missing heads, and a generator of nodes that are at least a superset of
682 heads missing, this function updates partial to be correct.
679 heads missing, this function updates partial to be correct.
683 """
680 """
684 # collect new branch entries
681 # collect new branch entries
685 newbranches = {}
682 newbranches = {}
686 for c in ctxgen:
683 for c in ctxgen:
687 newbranches.setdefault(c.branch(), []).append(c.node())
684 newbranches.setdefault(c.branch(), []).append(c.node())
688 # if older branchheads are reachable from new ones, they aren't
685 # if older branchheads are reachable from new ones, they aren't
689 # really branchheads. Note checking parents is insufficient:
686 # really branchheads. Note checking parents is insufficient:
690 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
687 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
691 for branch, newnodes in newbranches.iteritems():
688 for branch, newnodes in newbranches.iteritems():
692 bheads = partial.setdefault(branch, [])
689 bheads = partial.setdefault(branch, [])
693 # Remove candidate heads that no longer are in the repo (e.g., as
690 # Remove candidate heads that no longer are in the repo (e.g., as
694 # the result of a strip that just happened). Avoid using 'node in
691 # the result of a strip that just happened). Avoid using 'node in
695 # self' here because that dives down into branchcache code somewhat
692 # self' here because that dives down into branchcache code somewhat
696 # recrusively.
693 # recrusively.
697 bheadrevs = [self.changelog.rev(node) for node in bheads
694 bheadrevs = [self.changelog.rev(node) for node in bheads
698 if self.changelog.hasnode(node)]
695 if self.changelog.hasnode(node)]
699 newheadrevs = [self.changelog.rev(node) for node in newnodes
696 newheadrevs = [self.changelog.rev(node) for node in newnodes
700 if self.changelog.hasnode(node)]
697 if self.changelog.hasnode(node)]
701 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
698 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
702 # Remove duplicates - nodes that are in newheadrevs and are already
699 # Remove duplicates - nodes that are in newheadrevs and are already
703 # in bheadrevs. This can happen if you strip a node whose parent
700 # in bheadrevs. This can happen if you strip a node whose parent
704 # was already a head (because they're on different branches).
701 # was already a head (because they're on different branches).
705 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
702 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
706
703
707 # Starting from tip means fewer passes over reachable. If we know
704 # Starting from tip means fewer passes over reachable. If we know
708 # the new candidates are not ancestors of existing heads, we don't
705 # the new candidates are not ancestors of existing heads, we don't
709 # have to examine ancestors of existing heads
706 # have to examine ancestors of existing heads
710 if ctxisnew:
707 if ctxisnew:
711 iterrevs = sorted(newheadrevs)
708 iterrevs = sorted(newheadrevs)
712 else:
709 else:
713 iterrevs = list(bheadrevs)
710 iterrevs = list(bheadrevs)
714
711
715 # This loop prunes out two kinds of heads - heads that are
712 # This loop prunes out two kinds of heads - heads that are
716 # superceded by a head in newheadrevs, and newheadrevs that are not
713 # superceded by a head in newheadrevs, and newheadrevs that are not
717 # heads because an existing head is their descendant.
714 # heads because an existing head is their descendant.
718 while iterrevs:
715 while iterrevs:
719 latest = iterrevs.pop()
716 latest = iterrevs.pop()
720 if latest not in bheadrevs:
717 if latest not in bheadrevs:
721 continue
718 continue
722 ancestors = set(self.changelog.ancestors([latest],
719 ancestors = set(self.changelog.ancestors([latest],
723 bheadrevs[0]))
720 bheadrevs[0]))
724 if ancestors:
721 if ancestors:
725 bheadrevs = [b for b in bheadrevs if b not in ancestors]
722 bheadrevs = [b for b in bheadrevs if b not in ancestors]
726 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
723 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
727
724
728 # There may be branches that cease to exist when the last commit in the
725 # There may be branches that cease to exist when the last commit in the
729 # branch was stripped. This code filters them out. Note that the
726 # branch was stripped. This code filters them out. Note that the
730 # branch that ceased to exist may not be in newbranches because
727 # branch that ceased to exist may not be in newbranches because
731 # newbranches is the set of candidate heads, which when you strip the
728 # newbranches is the set of candidate heads, which when you strip the
732 # last commit in a branch will be the parent branch.
729 # last commit in a branch will be the parent branch.
733 for branch in partial:
730 for branch in partial:
734 nodes = [head for head in partial[branch]
731 nodes = [head for head in partial[branch]
735 if self.changelog.hasnode(head)]
732 if self.changelog.hasnode(head)]
736 if not nodes:
733 if not nodes:
737 del partial[branch]
734 del partial[branch]
738
735
739 def lookup(self, key):
736 def lookup(self, key):
740 return self[key].node()
737 return self[key].node()
741
738
742 def lookupbranch(self, key, remote=None):
739 def lookupbranch(self, key, remote=None):
743 repo = remote or self
740 repo = remote or self
744 if key in repo.branchmap():
741 if key in repo.branchmap():
745 return key
742 return key
746
743
747 repo = (remote and remote.local()) and remote or self
744 repo = (remote and remote.local()) and remote or self
748 return repo[key].branch()
745 return repo[key].branch()
749
746
750 def known(self, nodes):
747 def known(self, nodes):
751 nm = self.changelog.nodemap
748 nm = self.changelog.nodemap
752 pc = self._phasecache
749 pc = self._phasecache
753 result = []
750 result = []
754 for n in nodes:
751 for n in nodes:
755 r = nm.get(n)
752 r = nm.get(n)
756 resp = not (r is None or pc.phase(self, r) >= phases.secret)
753 resp = not (r is None or pc.phase(self, r) >= phases.secret)
757 result.append(resp)
754 result.append(resp)
758 return result
755 return result
759
756
760 def local(self):
757 def local(self):
761 return self
758 return self
762
759
763 def cancopy(self):
760 def cancopy(self):
764 return self.local() # so statichttprepo's override of local() works
761 return self.local() # so statichttprepo's override of local() works
765
762
766 def join(self, f):
763 def join(self, f):
767 return os.path.join(self.path, f)
764 return os.path.join(self.path, f)
768
765
769 def wjoin(self, f):
766 def wjoin(self, f):
770 return os.path.join(self.root, f)
767 return os.path.join(self.root, f)
771
768
772 def file(self, f):
769 def file(self, f):
773 if f[0] == '/':
770 if f[0] == '/':
774 f = f[1:]
771 f = f[1:]
775 return filelog.filelog(self.sopener, f)
772 return filelog.filelog(self.sopener, f)
776
773
777 def changectx(self, changeid):
774 def changectx(self, changeid):
778 return self[changeid]
775 return self[changeid]
779
776
780 def parents(self, changeid=None):
777 def parents(self, changeid=None):
781 '''get list of changectxs for parents of changeid'''
778 '''get list of changectxs for parents of changeid'''
782 return self[changeid].parents()
779 return self[changeid].parents()
783
780
784 def setparents(self, p1, p2=nullid):
781 def setparents(self, p1, p2=nullid):
785 copies = self.dirstate.setparents(p1, p2)
782 copies = self.dirstate.setparents(p1, p2)
786 if copies:
783 if copies:
787 # Adjust copy records, the dirstate cannot do it, it
784 # Adjust copy records, the dirstate cannot do it, it
788 # requires access to parents manifests. Preserve them
785 # requires access to parents manifests. Preserve them
789 # only for entries added to first parent.
786 # only for entries added to first parent.
790 pctx = self[p1]
787 pctx = self[p1]
791 for f in copies:
788 for f in copies:
792 if f not in pctx and copies[f] in pctx:
789 if f not in pctx and copies[f] in pctx:
793 self.dirstate.copy(copies[f], f)
790 self.dirstate.copy(copies[f], f)
794
791
795 def filectx(self, path, changeid=None, fileid=None):
792 def filectx(self, path, changeid=None, fileid=None):
796 """changeid can be a changeset revision, node, or tag.
793 """changeid can be a changeset revision, node, or tag.
797 fileid can be a file revision or node."""
794 fileid can be a file revision or node."""
798 return context.filectx(self, path, changeid, fileid)
795 return context.filectx(self, path, changeid, fileid)
799
796
800 def getcwd(self):
797 def getcwd(self):
801 return self.dirstate.getcwd()
798 return self.dirstate.getcwd()
802
799
803 def pathto(self, f, cwd=None):
800 def pathto(self, f, cwd=None):
804 return self.dirstate.pathto(f, cwd)
801 return self.dirstate.pathto(f, cwd)
805
802
806 def wfile(self, f, mode='r'):
803 def wfile(self, f, mode='r'):
807 return self.wopener(f, mode)
804 return self.wopener(f, mode)
808
805
809 def _link(self, f):
806 def _link(self, f):
810 return os.path.islink(self.wjoin(f))
807 return os.path.islink(self.wjoin(f))
811
808
812 def _loadfilter(self, filter):
809 def _loadfilter(self, filter):
813 if filter not in self.filterpats:
810 if filter not in self.filterpats:
814 l = []
811 l = []
815 for pat, cmd in self.ui.configitems(filter):
812 for pat, cmd in self.ui.configitems(filter):
816 if cmd == '!':
813 if cmd == '!':
817 continue
814 continue
818 mf = matchmod.match(self.root, '', [pat])
815 mf = matchmod.match(self.root, '', [pat])
819 fn = None
816 fn = None
820 params = cmd
817 params = cmd
821 for name, filterfn in self._datafilters.iteritems():
818 for name, filterfn in self._datafilters.iteritems():
822 if cmd.startswith(name):
819 if cmd.startswith(name):
823 fn = filterfn
820 fn = filterfn
824 params = cmd[len(name):].lstrip()
821 params = cmd[len(name):].lstrip()
825 break
822 break
826 if not fn:
823 if not fn:
827 fn = lambda s, c, **kwargs: util.filter(s, c)
824 fn = lambda s, c, **kwargs: util.filter(s, c)
828 # Wrap old filters not supporting keyword arguments
825 # Wrap old filters not supporting keyword arguments
829 if not inspect.getargspec(fn)[2]:
826 if not inspect.getargspec(fn)[2]:
830 oldfn = fn
827 oldfn = fn
831 fn = lambda s, c, **kwargs: oldfn(s, c)
828 fn = lambda s, c, **kwargs: oldfn(s, c)
832 l.append((mf, fn, params))
829 l.append((mf, fn, params))
833 self.filterpats[filter] = l
830 self.filterpats[filter] = l
834 return self.filterpats[filter]
831 return self.filterpats[filter]
835
832
836 def _filter(self, filterpats, filename, data):
833 def _filter(self, filterpats, filename, data):
837 for mf, fn, cmd in filterpats:
834 for mf, fn, cmd in filterpats:
838 if mf(filename):
835 if mf(filename):
839 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
836 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
840 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
837 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
841 break
838 break
842
839
843 return data
840 return data
844
841
845 @propertycache
842 @propertycache
846 def _encodefilterpats(self):
843 def _encodefilterpats(self):
847 return self._loadfilter('encode')
844 return self._loadfilter('encode')
848
845
849 @propertycache
846 @propertycache
850 def _decodefilterpats(self):
847 def _decodefilterpats(self):
851 return self._loadfilter('decode')
848 return self._loadfilter('decode')
852
849
853 def adddatafilter(self, name, filter):
850 def adddatafilter(self, name, filter):
854 self._datafilters[name] = filter
851 self._datafilters[name] = filter
855
852
856 def wread(self, filename):
853 def wread(self, filename):
857 if self._link(filename):
854 if self._link(filename):
858 data = os.readlink(self.wjoin(filename))
855 data = os.readlink(self.wjoin(filename))
859 else:
856 else:
860 data = self.wopener.read(filename)
857 data = self.wopener.read(filename)
861 return self._filter(self._encodefilterpats, filename, data)
858 return self._filter(self._encodefilterpats, filename, data)
862
859
863 def wwrite(self, filename, data, flags):
860 def wwrite(self, filename, data, flags):
864 data = self._filter(self._decodefilterpats, filename, data)
861 data = self._filter(self._decodefilterpats, filename, data)
865 if 'l' in flags:
862 if 'l' in flags:
866 self.wopener.symlink(data, filename)
863 self.wopener.symlink(data, filename)
867 else:
864 else:
868 self.wopener.write(filename, data)
865 self.wopener.write(filename, data)
869 if 'x' in flags:
866 if 'x' in flags:
870 util.setflags(self.wjoin(filename), False, True)
867 util.setflags(self.wjoin(filename), False, True)
871
868
872 def wwritedata(self, filename, data):
869 def wwritedata(self, filename, data):
873 return self._filter(self._decodefilterpats, filename, data)
870 return self._filter(self._decodefilterpats, filename, data)
874
871
875 def transaction(self, desc):
872 def transaction(self, desc):
876 tr = self._transref and self._transref() or None
873 tr = self._transref and self._transref() or None
877 if tr and tr.running():
874 if tr and tr.running():
878 return tr.nest()
875 return tr.nest()
879
876
880 # abort here if the journal already exists
877 # abort here if the journal already exists
881 if os.path.exists(self.sjoin("journal")):
878 if os.path.exists(self.sjoin("journal")):
882 raise error.RepoError(
879 raise error.RepoError(
883 _("abandoned transaction found - run hg recover"))
880 _("abandoned transaction found - run hg recover"))
884
881
885 self._writejournal(desc)
882 self._writejournal(desc)
886 renames = [(x, undoname(x)) for x in self._journalfiles()]
883 renames = [(x, undoname(x)) for x in self._journalfiles()]
887
884
888 tr = transaction.transaction(self.ui.warn, self.sopener,
885 tr = transaction.transaction(self.ui.warn, self.sopener,
889 self.sjoin("journal"),
886 self.sjoin("journal"),
890 aftertrans(renames),
887 aftertrans(renames),
891 self.store.createmode)
888 self.store.createmode)
892 self._transref = weakref.ref(tr)
889 self._transref = weakref.ref(tr)
893 return tr
890 return tr
894
891
895 def _journalfiles(self):
892 def _journalfiles(self):
896 return (self.sjoin('journal'), self.join('journal.dirstate'),
893 return (self.sjoin('journal'), self.join('journal.dirstate'),
897 self.join('journal.branch'), self.join('journal.desc'),
894 self.join('journal.branch'), self.join('journal.desc'),
898 self.join('journal.bookmarks'),
895 self.join('journal.bookmarks'),
899 self.sjoin('journal.phaseroots'))
896 self.sjoin('journal.phaseroots'))
900
897
901 def undofiles(self):
898 def undofiles(self):
902 return [undoname(x) for x in self._journalfiles()]
899 return [undoname(x) for x in self._journalfiles()]
903
900
904 def _writejournal(self, desc):
901 def _writejournal(self, desc):
905 self.opener.write("journal.dirstate",
902 self.opener.write("journal.dirstate",
906 self.opener.tryread("dirstate"))
903 self.opener.tryread("dirstate"))
907 self.opener.write("journal.branch",
904 self.opener.write("journal.branch",
908 encoding.fromlocal(self.dirstate.branch()))
905 encoding.fromlocal(self.dirstate.branch()))
909 self.opener.write("journal.desc",
906 self.opener.write("journal.desc",
910 "%d\n%s\n" % (len(self), desc))
907 "%d\n%s\n" % (len(self), desc))
911 self.opener.write("journal.bookmarks",
908 self.opener.write("journal.bookmarks",
912 self.opener.tryread("bookmarks"))
909 self.opener.tryread("bookmarks"))
913 self.sopener.write("journal.phaseroots",
910 self.sopener.write("journal.phaseroots",
914 self.sopener.tryread("phaseroots"))
911 self.sopener.tryread("phaseroots"))
915
912
916 def recover(self):
913 def recover(self):
917 lock = self.lock()
914 lock = self.lock()
918 try:
915 try:
919 if os.path.exists(self.sjoin("journal")):
916 if os.path.exists(self.sjoin("journal")):
920 self.ui.status(_("rolling back interrupted transaction\n"))
917 self.ui.status(_("rolling back interrupted transaction\n"))
921 transaction.rollback(self.sopener, self.sjoin("journal"),
918 transaction.rollback(self.sopener, self.sjoin("journal"),
922 self.ui.warn)
919 self.ui.warn)
923 self.invalidate()
920 self.invalidate()
924 return True
921 return True
925 else:
922 else:
926 self.ui.warn(_("no interrupted transaction available\n"))
923 self.ui.warn(_("no interrupted transaction available\n"))
927 return False
924 return False
928 finally:
925 finally:
929 lock.release()
926 lock.release()
930
927
931 def rollback(self, dryrun=False, force=False):
928 def rollback(self, dryrun=False, force=False):
932 wlock = lock = None
929 wlock = lock = None
933 try:
930 try:
934 wlock = self.wlock()
931 wlock = self.wlock()
935 lock = self.lock()
932 lock = self.lock()
936 if os.path.exists(self.sjoin("undo")):
933 if os.path.exists(self.sjoin("undo")):
937 return self._rollback(dryrun, force)
934 return self._rollback(dryrun, force)
938 else:
935 else:
939 self.ui.warn(_("no rollback information available\n"))
936 self.ui.warn(_("no rollback information available\n"))
940 return 1
937 return 1
941 finally:
938 finally:
942 release(lock, wlock)
939 release(lock, wlock)
943
940
944 def _rollback(self, dryrun, force):
941 def _rollback(self, dryrun, force):
945 ui = self.ui
942 ui = self.ui
946 try:
943 try:
947 args = self.opener.read('undo.desc').splitlines()
944 args = self.opener.read('undo.desc').splitlines()
948 (oldlen, desc, detail) = (int(args[0]), args[1], None)
945 (oldlen, desc, detail) = (int(args[0]), args[1], None)
949 if len(args) >= 3:
946 if len(args) >= 3:
950 detail = args[2]
947 detail = args[2]
951 oldtip = oldlen - 1
948 oldtip = oldlen - 1
952
949
953 if detail and ui.verbose:
950 if detail and ui.verbose:
954 msg = (_('repository tip rolled back to revision %s'
951 msg = (_('repository tip rolled back to revision %s'
955 ' (undo %s: %s)\n')
952 ' (undo %s: %s)\n')
956 % (oldtip, desc, detail))
953 % (oldtip, desc, detail))
957 else:
954 else:
958 msg = (_('repository tip rolled back to revision %s'
955 msg = (_('repository tip rolled back to revision %s'
959 ' (undo %s)\n')
956 ' (undo %s)\n')
960 % (oldtip, desc))
957 % (oldtip, desc))
961 except IOError:
958 except IOError:
962 msg = _('rolling back unknown transaction\n')
959 msg = _('rolling back unknown transaction\n')
963 desc = None
960 desc = None
964
961
965 if not force and self['.'] != self['tip'] and desc == 'commit':
962 if not force and self['.'] != self['tip'] and desc == 'commit':
966 raise util.Abort(
963 raise util.Abort(
967 _('rollback of last commit while not checked out '
964 _('rollback of last commit while not checked out '
968 'may lose data'), hint=_('use -f to force'))
965 'may lose data'), hint=_('use -f to force'))
969
966
970 ui.status(msg)
967 ui.status(msg)
971 if dryrun:
968 if dryrun:
972 return 0
969 return 0
973
970
974 parents = self.dirstate.parents()
971 parents = self.dirstate.parents()
975 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
972 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
976 if os.path.exists(self.join('undo.bookmarks')):
973 if os.path.exists(self.join('undo.bookmarks')):
977 util.rename(self.join('undo.bookmarks'),
974 util.rename(self.join('undo.bookmarks'),
978 self.join('bookmarks'))
975 self.join('bookmarks'))
979 if os.path.exists(self.sjoin('undo.phaseroots')):
976 if os.path.exists(self.sjoin('undo.phaseroots')):
980 util.rename(self.sjoin('undo.phaseroots'),
977 util.rename(self.sjoin('undo.phaseroots'),
981 self.sjoin('phaseroots'))
978 self.sjoin('phaseroots'))
982 self.invalidate()
979 self.invalidate()
983
980
984 parentgone = (parents[0] not in self.changelog.nodemap or
981 parentgone = (parents[0] not in self.changelog.nodemap or
985 parents[1] not in self.changelog.nodemap)
982 parents[1] not in self.changelog.nodemap)
986 if parentgone:
983 if parentgone:
987 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
984 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
988 try:
985 try:
989 branch = self.opener.read('undo.branch')
986 branch = self.opener.read('undo.branch')
990 self.dirstate.setbranch(branch)
987 self.dirstate.setbranch(branch)
991 except IOError:
988 except IOError:
992 ui.warn(_('named branch could not be reset: '
989 ui.warn(_('named branch could not be reset: '
993 'current branch is still \'%s\'\n')
990 'current branch is still \'%s\'\n')
994 % self.dirstate.branch())
991 % self.dirstate.branch())
995
992
996 self.dirstate.invalidate()
993 self.dirstate.invalidate()
997 parents = tuple([p.rev() for p in self.parents()])
994 parents = tuple([p.rev() for p in self.parents()])
998 if len(parents) > 1:
995 if len(parents) > 1:
999 ui.status(_('working directory now based on '
996 ui.status(_('working directory now based on '
1000 'revisions %d and %d\n') % parents)
997 'revisions %d and %d\n') % parents)
1001 else:
998 else:
1002 ui.status(_('working directory now based on '
999 ui.status(_('working directory now based on '
1003 'revision %d\n') % parents)
1000 'revision %d\n') % parents)
1004 # TODO: if we know which new heads may result from this rollback, pass
1001 # TODO: if we know which new heads may result from this rollback, pass
1005 # them to destroy(), which will prevent the branchhead cache from being
1002 # them to destroy(), which will prevent the branchhead cache from being
1006 # invalidated.
1003 # invalidated.
1007 self.destroyed()
1004 self.destroyed()
1008 return 0
1005 return 0
1009
1006
1010 def invalidatecaches(self):
1007 def invalidatecaches(self):
1011 def delcache(name):
1008 def delcache(name):
1012 try:
1009 try:
1013 delattr(self, name)
1010 delattr(self, name)
1014 except AttributeError:
1011 except AttributeError:
1015 pass
1012 pass
1016
1013
1017 delcache('_tagscache')
1014 delcache('_tagscache')
1018
1015
1019 self._branchcache = None # in UTF-8
1016 self._branchcache = None # in UTF-8
1020 self._branchcachetip = None
1017 self._branchcachetip = None
1021
1018
1022 def invalidatedirstate(self):
1019 def invalidatedirstate(self):
1023 '''Invalidates the dirstate, causing the next call to dirstate
1020 '''Invalidates the dirstate, causing the next call to dirstate
1024 to check if it was modified since the last time it was read,
1021 to check if it was modified since the last time it was read,
1025 rereading it if it has.
1022 rereading it if it has.
1026
1023
1027 This is different to dirstate.invalidate() that it doesn't always
1024 This is different to dirstate.invalidate() that it doesn't always
1028 rereads the dirstate. Use dirstate.invalidate() if you want to
1025 rereads the dirstate. Use dirstate.invalidate() if you want to
1029 explicitly read the dirstate again (i.e. restoring it to a previous
1026 explicitly read the dirstate again (i.e. restoring it to a previous
1030 known good state).'''
1027 known good state).'''
1031 if 'dirstate' in self.__dict__:
1028 if 'dirstate' in self.__dict__:
1032 for k in self.dirstate._filecache:
1029 for k in self.dirstate._filecache:
1033 try:
1030 try:
1034 delattr(self.dirstate, k)
1031 delattr(self.dirstate, k)
1035 except AttributeError:
1032 except AttributeError:
1036 pass
1033 pass
1037 delattr(self, 'dirstate')
1034 delattr(self, 'dirstate')
1038
1035
1039 def invalidate(self):
1036 def invalidate(self):
1040 for k in self._filecache:
1037 for k in self._filecache:
1041 # dirstate is invalidated separately in invalidatedirstate()
1038 # dirstate is invalidated separately in invalidatedirstate()
1042 if k == 'dirstate':
1039 if k == 'dirstate':
1043 continue
1040 continue
1044
1041
1045 try:
1042 try:
1046 delattr(self, k)
1043 delattr(self, k)
1047 except AttributeError:
1044 except AttributeError:
1048 pass
1045 pass
1049 self.invalidatecaches()
1046 self.invalidatecaches()
1050
1047
1051 # Discard all cache entries to force reloading everything.
1048 # Discard all cache entries to force reloading everything.
1052 self._filecache.clear()
1049 self._filecache.clear()
1053
1050
1054 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1051 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1055 try:
1052 try:
1056 l = lock.lock(lockname, 0, releasefn, desc=desc)
1053 l = lock.lock(lockname, 0, releasefn, desc=desc)
1057 except error.LockHeld, inst:
1054 except error.LockHeld, inst:
1058 if not wait:
1055 if not wait:
1059 raise
1056 raise
1060 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1057 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1061 (desc, inst.locker))
1058 (desc, inst.locker))
1062 # default to 600 seconds timeout
1059 # default to 600 seconds timeout
1063 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1060 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1064 releasefn, desc=desc)
1061 releasefn, desc=desc)
1065 if acquirefn:
1062 if acquirefn:
1066 acquirefn()
1063 acquirefn()
1067 return l
1064 return l
1068
1065
1069 def _afterlock(self, callback):
1066 def _afterlock(self, callback):
1070 """add a callback to the current repository lock.
1067 """add a callback to the current repository lock.
1071
1068
1072 The callback will be executed on lock release."""
1069 The callback will be executed on lock release."""
1073 l = self._lockref and self._lockref()
1070 l = self._lockref and self._lockref()
1074 if l:
1071 if l:
1075 l.postrelease.append(callback)
1072 l.postrelease.append(callback)
1076 else:
1073 else:
1077 callback()
1074 callback()
1078
1075
1079 def lock(self, wait=True):
1076 def lock(self, wait=True):
1080 '''Lock the repository store (.hg/store) and return a weak reference
1077 '''Lock the repository store (.hg/store) and return a weak reference
1081 to the lock. Use this before modifying the store (e.g. committing or
1078 to the lock. Use this before modifying the store (e.g. committing or
1082 stripping). If you are opening a transaction, get a lock as well.)'''
1079 stripping). If you are opening a transaction, get a lock as well.)'''
1083 l = self._lockref and self._lockref()
1080 l = self._lockref and self._lockref()
1084 if l is not None and l.held:
1081 if l is not None and l.held:
1085 l.lock()
1082 l.lock()
1086 return l
1083 return l
1087
1084
1088 def unlock():
1085 def unlock():
1089 self.store.write()
1086 self.store.write()
1090 if '_phasecache' in vars(self):
1087 if '_phasecache' in vars(self):
1091 self._phasecache.write()
1088 self._phasecache.write()
1092 for k, ce in self._filecache.items():
1089 for k, ce in self._filecache.items():
1093 if k == 'dirstate':
1090 if k == 'dirstate':
1094 continue
1091 continue
1095 ce.refresh()
1092 ce.refresh()
1096
1093
1097 l = self._lock(self.sjoin("lock"), wait, unlock,
1094 l = self._lock(self.sjoin("lock"), wait, unlock,
1098 self.invalidate, _('repository %s') % self.origroot)
1095 self.invalidate, _('repository %s') % self.origroot)
1099 self._lockref = weakref.ref(l)
1096 self._lockref = weakref.ref(l)
1100 return l
1097 return l
1101
1098
1102 def wlock(self, wait=True):
1099 def wlock(self, wait=True):
1103 '''Lock the non-store parts of the repository (everything under
1100 '''Lock the non-store parts of the repository (everything under
1104 .hg except .hg/store) and return a weak reference to the lock.
1101 .hg except .hg/store) and return a weak reference to the lock.
1105 Use this before modifying files in .hg.'''
1102 Use this before modifying files in .hg.'''
1106 l = self._wlockref and self._wlockref()
1103 l = self._wlockref and self._wlockref()
1107 if l is not None and l.held:
1104 if l is not None and l.held:
1108 l.lock()
1105 l.lock()
1109 return l
1106 return l
1110
1107
1111 def unlock():
1108 def unlock():
1112 self.dirstate.write()
1109 self.dirstate.write()
1113 ce = self._filecache.get('dirstate')
1110 ce = self._filecache.get('dirstate')
1114 if ce:
1111 if ce:
1115 ce.refresh()
1112 ce.refresh()
1116
1113
1117 l = self._lock(self.join("wlock"), wait, unlock,
1114 l = self._lock(self.join("wlock"), wait, unlock,
1118 self.invalidatedirstate, _('working directory of %s') %
1115 self.invalidatedirstate, _('working directory of %s') %
1119 self.origroot)
1116 self.origroot)
1120 self._wlockref = weakref.ref(l)
1117 self._wlockref = weakref.ref(l)
1121 return l
1118 return l
1122
1119
1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1120 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1124 """
1121 """
1125 commit an individual file as part of a larger transaction
1122 commit an individual file as part of a larger transaction
1126 """
1123 """
1127
1124
1128 fname = fctx.path()
1125 fname = fctx.path()
1129 text = fctx.data()
1126 text = fctx.data()
1130 flog = self.file(fname)
1127 flog = self.file(fname)
1131 fparent1 = manifest1.get(fname, nullid)
1128 fparent1 = manifest1.get(fname, nullid)
1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1129 fparent2 = fparent2o = manifest2.get(fname, nullid)
1133
1130
1134 meta = {}
1131 meta = {}
1135 copy = fctx.renamed()
1132 copy = fctx.renamed()
1136 if copy and copy[0] != fname:
1133 if copy and copy[0] != fname:
1137 # Mark the new revision of this file as a copy of another
1134 # Mark the new revision of this file as a copy of another
1138 # file. This copy data will effectively act as a parent
1135 # file. This copy data will effectively act as a parent
1139 # of this new revision. If this is a merge, the first
1136 # of this new revision. If this is a merge, the first
1140 # parent will be the nullid (meaning "look up the copy data")
1137 # parent will be the nullid (meaning "look up the copy data")
1141 # and the second one will be the other parent. For example:
1138 # and the second one will be the other parent. For example:
1142 #
1139 #
1143 # 0 --- 1 --- 3 rev1 changes file foo
1140 # 0 --- 1 --- 3 rev1 changes file foo
1144 # \ / rev2 renames foo to bar and changes it
1141 # \ / rev2 renames foo to bar and changes it
1145 # \- 2 -/ rev3 should have bar with all changes and
1142 # \- 2 -/ rev3 should have bar with all changes and
1146 # should record that bar descends from
1143 # should record that bar descends from
1147 # bar in rev2 and foo in rev1
1144 # bar in rev2 and foo in rev1
1148 #
1145 #
1149 # this allows this merge to succeed:
1146 # this allows this merge to succeed:
1150 #
1147 #
1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1148 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1152 # \ / merging rev3 and rev4 should use bar@rev2
1149 # \ / merging rev3 and rev4 should use bar@rev2
1153 # \- 2 --- 4 as the merge base
1150 # \- 2 --- 4 as the merge base
1154 #
1151 #
1155
1152
1156 cfname = copy[0]
1153 cfname = copy[0]
1157 crev = manifest1.get(cfname)
1154 crev = manifest1.get(cfname)
1158 newfparent = fparent2
1155 newfparent = fparent2
1159
1156
1160 if manifest2: # branch merge
1157 if manifest2: # branch merge
1161 if fparent2 == nullid or crev is None: # copied on remote side
1158 if fparent2 == nullid or crev is None: # copied on remote side
1162 if cfname in manifest2:
1159 if cfname in manifest2:
1163 crev = manifest2[cfname]
1160 crev = manifest2[cfname]
1164 newfparent = fparent1
1161 newfparent = fparent1
1165
1162
1166 # find source in nearest ancestor if we've lost track
1163 # find source in nearest ancestor if we've lost track
1167 if not crev:
1164 if not crev:
1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1165 self.ui.debug(" %s: searching for copy revision for %s\n" %
1169 (fname, cfname))
1166 (fname, cfname))
1170 for ancestor in self[None].ancestors():
1167 for ancestor in self[None].ancestors():
1171 if cfname in ancestor:
1168 if cfname in ancestor:
1172 crev = ancestor[cfname].filenode()
1169 crev = ancestor[cfname].filenode()
1173 break
1170 break
1174
1171
1175 if crev:
1172 if crev:
1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1173 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1177 meta["copy"] = cfname
1174 meta["copy"] = cfname
1178 meta["copyrev"] = hex(crev)
1175 meta["copyrev"] = hex(crev)
1179 fparent1, fparent2 = nullid, newfparent
1176 fparent1, fparent2 = nullid, newfparent
1180 else:
1177 else:
1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1178 self.ui.warn(_("warning: can't find ancestor for '%s' "
1182 "copied from '%s'!\n") % (fname, cfname))
1179 "copied from '%s'!\n") % (fname, cfname))
1183
1180
1184 elif fparent2 != nullid:
1181 elif fparent2 != nullid:
1185 # is one parent an ancestor of the other?
1182 # is one parent an ancestor of the other?
1186 fparentancestor = flog.ancestor(fparent1, fparent2)
1183 fparentancestor = flog.ancestor(fparent1, fparent2)
1187 if fparentancestor == fparent1:
1184 if fparentancestor == fparent1:
1188 fparent1, fparent2 = fparent2, nullid
1185 fparent1, fparent2 = fparent2, nullid
1189 elif fparentancestor == fparent2:
1186 elif fparentancestor == fparent2:
1190 fparent2 = nullid
1187 fparent2 = nullid
1191
1188
1192 # is the file changed?
1189 # is the file changed?
1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1190 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1194 changelist.append(fname)
1191 changelist.append(fname)
1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1192 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1196
1193
1197 # are just the flags changed during merge?
1194 # are just the flags changed during merge?
1198 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1195 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1199 changelist.append(fname)
1196 changelist.append(fname)
1200
1197
1201 return fparent1
1198 return fparent1
1202
1199
1203 def commit(self, text="", user=None, date=None, match=None, force=False,
1200 def commit(self, text="", user=None, date=None, match=None, force=False,
1204 editor=False, extra={}):
1201 editor=False, extra={}):
1205 """Add a new revision to current repository.
1202 """Add a new revision to current repository.
1206
1203
1207 Revision information is gathered from the working directory,
1204 Revision information is gathered from the working directory,
1208 match can be used to filter the committed files. If editor is
1205 match can be used to filter the committed files. If editor is
1209 supplied, it is called to get a commit message.
1206 supplied, it is called to get a commit message.
1210 """
1207 """
1211
1208
1212 def fail(f, msg):
1209 def fail(f, msg):
1213 raise util.Abort('%s: %s' % (f, msg))
1210 raise util.Abort('%s: %s' % (f, msg))
1214
1211
1215 if not match:
1212 if not match:
1216 match = matchmod.always(self.root, '')
1213 match = matchmod.always(self.root, '')
1217
1214
1218 if not force:
1215 if not force:
1219 vdirs = []
1216 vdirs = []
1220 match.dir = vdirs.append
1217 match.dir = vdirs.append
1221 match.bad = fail
1218 match.bad = fail
1222
1219
1223 wlock = self.wlock()
1220 wlock = self.wlock()
1224 try:
1221 try:
1225 wctx = self[None]
1222 wctx = self[None]
1226 merge = len(wctx.parents()) > 1
1223 merge = len(wctx.parents()) > 1
1227
1224
1228 if (not force and merge and match and
1225 if (not force and merge and match and
1229 (match.files() or match.anypats())):
1226 (match.files() or match.anypats())):
1230 raise util.Abort(_('cannot partially commit a merge '
1227 raise util.Abort(_('cannot partially commit a merge '
1231 '(do not specify files or patterns)'))
1228 '(do not specify files or patterns)'))
1232
1229
1233 changes = self.status(match=match, clean=force)
1230 changes = self.status(match=match, clean=force)
1234 if force:
1231 if force:
1235 changes[0].extend(changes[6]) # mq may commit unchanged files
1232 changes[0].extend(changes[6]) # mq may commit unchanged files
1236
1233
1237 # check subrepos
1234 # check subrepos
1238 subs = []
1235 subs = []
1239 commitsubs = set()
1236 commitsubs = set()
1240 newstate = wctx.substate.copy()
1237 newstate = wctx.substate.copy()
1241 # only manage subrepos and .hgsubstate if .hgsub is present
1238 # only manage subrepos and .hgsubstate if .hgsub is present
1242 if '.hgsub' in wctx:
1239 if '.hgsub' in wctx:
1243 # we'll decide whether to track this ourselves, thanks
1240 # we'll decide whether to track this ourselves, thanks
1244 if '.hgsubstate' in changes[0]:
1241 if '.hgsubstate' in changes[0]:
1245 changes[0].remove('.hgsubstate')
1242 changes[0].remove('.hgsubstate')
1246 if '.hgsubstate' in changes[2]:
1243 if '.hgsubstate' in changes[2]:
1247 changes[2].remove('.hgsubstate')
1244 changes[2].remove('.hgsubstate')
1248
1245
1249 # compare current state to last committed state
1246 # compare current state to last committed state
1250 # build new substate based on last committed state
1247 # build new substate based on last committed state
1251 oldstate = wctx.p1().substate
1248 oldstate = wctx.p1().substate
1252 for s in sorted(newstate.keys()):
1249 for s in sorted(newstate.keys()):
1253 if not match(s):
1250 if not match(s):
1254 # ignore working copy, use old state if present
1251 # ignore working copy, use old state if present
1255 if s in oldstate:
1252 if s in oldstate:
1256 newstate[s] = oldstate[s]
1253 newstate[s] = oldstate[s]
1257 continue
1254 continue
1258 if not force:
1255 if not force:
1259 raise util.Abort(
1256 raise util.Abort(
1260 _("commit with new subrepo %s excluded") % s)
1257 _("commit with new subrepo %s excluded") % s)
1261 if wctx.sub(s).dirty(True):
1258 if wctx.sub(s).dirty(True):
1262 if not self.ui.configbool('ui', 'commitsubrepos'):
1259 if not self.ui.configbool('ui', 'commitsubrepos'):
1263 raise util.Abort(
1260 raise util.Abort(
1264 _("uncommitted changes in subrepo %s") % s,
1261 _("uncommitted changes in subrepo %s") % s,
1265 hint=_("use --subrepos for recursive commit"))
1262 hint=_("use --subrepos for recursive commit"))
1266 subs.append(s)
1263 subs.append(s)
1267 commitsubs.add(s)
1264 commitsubs.add(s)
1268 else:
1265 else:
1269 bs = wctx.sub(s).basestate()
1266 bs = wctx.sub(s).basestate()
1270 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1267 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1271 if oldstate.get(s, (None, None, None))[1] != bs:
1268 if oldstate.get(s, (None, None, None))[1] != bs:
1272 subs.append(s)
1269 subs.append(s)
1273
1270
1274 # check for removed subrepos
1271 # check for removed subrepos
1275 for p in wctx.parents():
1272 for p in wctx.parents():
1276 r = [s for s in p.substate if s not in newstate]
1273 r = [s for s in p.substate if s not in newstate]
1277 subs += [s for s in r if match(s)]
1274 subs += [s for s in r if match(s)]
1278 if subs:
1275 if subs:
1279 if (not match('.hgsub') and
1276 if (not match('.hgsub') and
1280 '.hgsub' in (wctx.modified() + wctx.added())):
1277 '.hgsub' in (wctx.modified() + wctx.added())):
1281 raise util.Abort(
1278 raise util.Abort(
1282 _("can't commit subrepos without .hgsub"))
1279 _("can't commit subrepos without .hgsub"))
1283 changes[0].insert(0, '.hgsubstate')
1280 changes[0].insert(0, '.hgsubstate')
1284
1281
1285 elif '.hgsub' in changes[2]:
1282 elif '.hgsub' in changes[2]:
1286 # clean up .hgsubstate when .hgsub is removed
1283 # clean up .hgsubstate when .hgsub is removed
1287 if ('.hgsubstate' in wctx and
1284 if ('.hgsubstate' in wctx and
1288 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1285 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1289 changes[2].insert(0, '.hgsubstate')
1286 changes[2].insert(0, '.hgsubstate')
1290
1287
1291 # make sure all explicit patterns are matched
1288 # make sure all explicit patterns are matched
1292 if not force and match.files():
1289 if not force and match.files():
1293 matched = set(changes[0] + changes[1] + changes[2])
1290 matched = set(changes[0] + changes[1] + changes[2])
1294
1291
1295 for f in match.files():
1292 for f in match.files():
1296 if f == '.' or f in matched or f in wctx.substate:
1293 if f == '.' or f in matched or f in wctx.substate:
1297 continue
1294 continue
1298 if f in changes[3]: # missing
1295 if f in changes[3]: # missing
1299 fail(f, _('file not found!'))
1296 fail(f, _('file not found!'))
1300 if f in vdirs: # visited directory
1297 if f in vdirs: # visited directory
1301 d = f + '/'
1298 d = f + '/'
1302 for mf in matched:
1299 for mf in matched:
1303 if mf.startswith(d):
1300 if mf.startswith(d):
1304 break
1301 break
1305 else:
1302 else:
1306 fail(f, _("no match under directory!"))
1303 fail(f, _("no match under directory!"))
1307 elif f not in self.dirstate:
1304 elif f not in self.dirstate:
1308 fail(f, _("file not tracked!"))
1305 fail(f, _("file not tracked!"))
1309
1306
1310 if (not force and not extra.get("close") and not merge
1307 if (not force and not extra.get("close") and not merge
1311 and not (changes[0] or changes[1] or changes[2])
1308 and not (changes[0] or changes[1] or changes[2])
1312 and wctx.branch() == wctx.p1().branch()):
1309 and wctx.branch() == wctx.p1().branch()):
1313 return None
1310 return None
1314
1311
1315 if merge and changes[3]:
1312 if merge and changes[3]:
1316 raise util.Abort(_("cannot commit merge with missing files"))
1313 raise util.Abort(_("cannot commit merge with missing files"))
1317
1314
1318 ms = mergemod.mergestate(self)
1315 ms = mergemod.mergestate(self)
1319 for f in changes[0]:
1316 for f in changes[0]:
1320 if f in ms and ms[f] == 'u':
1317 if f in ms and ms[f] == 'u':
1321 raise util.Abort(_("unresolved merge conflicts "
1318 raise util.Abort(_("unresolved merge conflicts "
1322 "(see hg help resolve)"))
1319 "(see hg help resolve)"))
1323
1320
1324 cctx = context.workingctx(self, text, user, date, extra, changes)
1321 cctx = context.workingctx(self, text, user, date, extra, changes)
1325 if editor:
1322 if editor:
1326 cctx._text = editor(self, cctx, subs)
1323 cctx._text = editor(self, cctx, subs)
1327 edited = (text != cctx._text)
1324 edited = (text != cctx._text)
1328
1325
1329 # commit subs and write new state
1326 # commit subs and write new state
1330 if subs:
1327 if subs:
1331 for s in sorted(commitsubs):
1328 for s in sorted(commitsubs):
1332 sub = wctx.sub(s)
1329 sub = wctx.sub(s)
1333 self.ui.status(_('committing subrepository %s\n') %
1330 self.ui.status(_('committing subrepository %s\n') %
1334 subrepo.subrelpath(sub))
1331 subrepo.subrelpath(sub))
1335 sr = sub.commit(cctx._text, user, date)
1332 sr = sub.commit(cctx._text, user, date)
1336 newstate[s] = (newstate[s][0], sr)
1333 newstate[s] = (newstate[s][0], sr)
1337 subrepo.writestate(self, newstate)
1334 subrepo.writestate(self, newstate)
1338
1335
1339 # Save commit message in case this transaction gets rolled back
1336 # Save commit message in case this transaction gets rolled back
1340 # (e.g. by a pretxncommit hook). Leave the content alone on
1337 # (e.g. by a pretxncommit hook). Leave the content alone on
1341 # the assumption that the user will use the same editor again.
1338 # the assumption that the user will use the same editor again.
1342 msgfn = self.savecommitmessage(cctx._text)
1339 msgfn = self.savecommitmessage(cctx._text)
1343
1340
1344 p1, p2 = self.dirstate.parents()
1341 p1, p2 = self.dirstate.parents()
1345 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1342 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1346 try:
1343 try:
1347 self.hook("precommit", throw=True, parent1=hookp1,
1344 self.hook("precommit", throw=True, parent1=hookp1,
1348 parent2=hookp2)
1345 parent2=hookp2)
1349 ret = self.commitctx(cctx, True)
1346 ret = self.commitctx(cctx, True)
1350 except: # re-raises
1347 except: # re-raises
1351 if edited:
1348 if edited:
1352 self.ui.write(
1349 self.ui.write(
1353 _('note: commit message saved in %s\n') % msgfn)
1350 _('note: commit message saved in %s\n') % msgfn)
1354 raise
1351 raise
1355
1352
1356 # update bookmarks, dirstate and mergestate
1353 # update bookmarks, dirstate and mergestate
1357 bookmarks.update(self, [p1, p2], ret)
1354 bookmarks.update(self, [p1, p2], ret)
1358 for f in changes[0] + changes[1]:
1355 for f in changes[0] + changes[1]:
1359 self.dirstate.normal(f)
1356 self.dirstate.normal(f)
1360 for f in changes[2]:
1357 for f in changes[2]:
1361 self.dirstate.drop(f)
1358 self.dirstate.drop(f)
1362 self.dirstate.setparents(ret)
1359 self.dirstate.setparents(ret)
1363 ms.reset()
1360 ms.reset()
1364 finally:
1361 finally:
1365 wlock.release()
1362 wlock.release()
1366
1363
1367 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1364 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1368 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1365 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1369 self._afterlock(commithook)
1366 self._afterlock(commithook)
1370 return ret
1367 return ret
1371
1368
1372 def commitctx(self, ctx, error=False):
1369 def commitctx(self, ctx, error=False):
1373 """Add a new revision to current repository.
1370 """Add a new revision to current repository.
1374 Revision information is passed via the context argument.
1371 Revision information is passed via the context argument.
1375 """
1372 """
1376
1373
1377 tr = lock = None
1374 tr = lock = None
1378 removed = list(ctx.removed())
1375 removed = list(ctx.removed())
1379 p1, p2 = ctx.p1(), ctx.p2()
1376 p1, p2 = ctx.p1(), ctx.p2()
1380 user = ctx.user()
1377 user = ctx.user()
1381
1378
1382 lock = self.lock()
1379 lock = self.lock()
1383 try:
1380 try:
1384 tr = self.transaction("commit")
1381 tr = self.transaction("commit")
1385 trp = weakref.proxy(tr)
1382 trp = weakref.proxy(tr)
1386
1383
1387 if ctx.files():
1384 if ctx.files():
1388 m1 = p1.manifest().copy()
1385 m1 = p1.manifest().copy()
1389 m2 = p2.manifest()
1386 m2 = p2.manifest()
1390
1387
1391 # check in files
1388 # check in files
1392 new = {}
1389 new = {}
1393 changed = []
1390 changed = []
1394 linkrev = len(self)
1391 linkrev = len(self)
1395 for f in sorted(ctx.modified() + ctx.added()):
1392 for f in sorted(ctx.modified() + ctx.added()):
1396 self.ui.note(f + "\n")
1393 self.ui.note(f + "\n")
1397 try:
1394 try:
1398 fctx = ctx[f]
1395 fctx = ctx[f]
1399 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1396 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1400 changed)
1397 changed)
1401 m1.set(f, fctx.flags())
1398 m1.set(f, fctx.flags())
1402 except OSError, inst:
1399 except OSError, inst:
1403 self.ui.warn(_("trouble committing %s!\n") % f)
1400 self.ui.warn(_("trouble committing %s!\n") % f)
1404 raise
1401 raise
1405 except IOError, inst:
1402 except IOError, inst:
1406 errcode = getattr(inst, 'errno', errno.ENOENT)
1403 errcode = getattr(inst, 'errno', errno.ENOENT)
1407 if error or errcode and errcode != errno.ENOENT:
1404 if error or errcode and errcode != errno.ENOENT:
1408 self.ui.warn(_("trouble committing %s!\n") % f)
1405 self.ui.warn(_("trouble committing %s!\n") % f)
1409 raise
1406 raise
1410 else:
1407 else:
1411 removed.append(f)
1408 removed.append(f)
1412
1409
1413 # update manifest
1410 # update manifest
1414 m1.update(new)
1411 m1.update(new)
1415 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1412 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1416 drop = [f for f in removed if f in m1]
1413 drop = [f for f in removed if f in m1]
1417 for f in drop:
1414 for f in drop:
1418 del m1[f]
1415 del m1[f]
1419 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1416 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1420 p2.manifestnode(), (new, drop))
1417 p2.manifestnode(), (new, drop))
1421 files = changed + removed
1418 files = changed + removed
1422 else:
1419 else:
1423 mn = p1.manifestnode()
1420 mn = p1.manifestnode()
1424 files = []
1421 files = []
1425
1422
1426 # update changelog
1423 # update changelog
1427 self.changelog.delayupdate()
1424 self.changelog.delayupdate()
1428 n = self.changelog.add(mn, files, ctx.description(),
1425 n = self.changelog.add(mn, files, ctx.description(),
1429 trp, p1.node(), p2.node(),
1426 trp, p1.node(), p2.node(),
1430 user, ctx.date(), ctx.extra().copy())
1427 user, ctx.date(), ctx.extra().copy())
1431 p = lambda: self.changelog.writepending() and self.root or ""
1428 p = lambda: self.changelog.writepending() and self.root or ""
1432 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1429 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1433 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1430 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1434 parent2=xp2, pending=p)
1431 parent2=xp2, pending=p)
1435 self.changelog.finalize(trp)
1432 self.changelog.finalize(trp)
1436 # set the new commit is proper phase
1433 # set the new commit is proper phase
1437 targetphase = phases.newcommitphase(self.ui)
1434 targetphase = phases.newcommitphase(self.ui)
1438 if targetphase:
1435 if targetphase:
1439 # retract boundary do not alter parent changeset.
1436 # retract boundary do not alter parent changeset.
1440 # if a parent have higher the resulting phase will
1437 # if a parent have higher the resulting phase will
1441 # be compliant anyway
1438 # be compliant anyway
1442 #
1439 #
1443 # if minimal phase was 0 we don't need to retract anything
1440 # if minimal phase was 0 we don't need to retract anything
1444 phases.retractboundary(self, targetphase, [n])
1441 phases.retractboundary(self, targetphase, [n])
1445 tr.close()
1442 tr.close()
1446 self.updatebranchcache()
1443 self.updatebranchcache()
1447 return n
1444 return n
1448 finally:
1445 finally:
1449 if tr:
1446 if tr:
1450 tr.release()
1447 tr.release()
1451 lock.release()
1448 lock.release()
1452
1449
1453 def destroyed(self, newheadnodes=None):
1450 def destroyed(self, newheadnodes=None):
1454 '''Inform the repository that nodes have been destroyed.
1451 '''Inform the repository that nodes have been destroyed.
1455 Intended for use by strip and rollback, so there's a common
1452 Intended for use by strip and rollback, so there's a common
1456 place for anything that has to be done after destroying history.
1453 place for anything that has to be done after destroying history.
1457
1454
1458 If you know the branchheadcache was uptodate before nodes were removed
1455 If you know the branchheadcache was uptodate before nodes were removed
1459 and you also know the set of candidate new heads that may have resulted
1456 and you also know the set of candidate new heads that may have resulted
1460 from the destruction, you can set newheadnodes. This will enable the
1457 from the destruction, you can set newheadnodes. This will enable the
1461 code to update the branchheads cache, rather than having future code
1458 code to update the branchheads cache, rather than having future code
1462 decide it's invalid and regenrating it from scratch.
1459 decide it's invalid and regenrating it from scratch.
1463 '''
1460 '''
1464 # If we have info, newheadnodes, on how to update the branch cache, do
1461 # If we have info, newheadnodes, on how to update the branch cache, do
1465 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1462 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1466 # will be caught the next time it is read.
1463 # will be caught the next time it is read.
1467 if newheadnodes:
1464 if newheadnodes:
1468 tiprev = len(self) - 1
1465 tiprev = len(self) - 1
1469 ctxgen = (self[node] for node in newheadnodes
1466 ctxgen = (self[node] for node in newheadnodes
1470 if self.changelog.hasnode(node))
1467 if self.changelog.hasnode(node))
1471 self._updatebranchcache(self._branchcache, ctxgen)
1468 self._updatebranchcache(self._branchcache, ctxgen)
1472 self._writebranchcache(self._branchcache, self.changelog.tip(),
1469 self._writebranchcache(self._branchcache, self.changelog.tip(),
1473 tiprev)
1470 tiprev)
1474
1471
1475 # Ensure the persistent tag cache is updated. Doing it now
1472 # Ensure the persistent tag cache is updated. Doing it now
1476 # means that the tag cache only has to worry about destroyed
1473 # means that the tag cache only has to worry about destroyed
1477 # heads immediately after a strip/rollback. That in turn
1474 # heads immediately after a strip/rollback. That in turn
1478 # guarantees that "cachetip == currenttip" (comparing both rev
1475 # guarantees that "cachetip == currenttip" (comparing both rev
1479 # and node) always means no nodes have been added or destroyed.
1476 # and node) always means no nodes have been added or destroyed.
1480
1477
1481 # XXX this is suboptimal when qrefresh'ing: we strip the current
1478 # XXX this is suboptimal when qrefresh'ing: we strip the current
1482 # head, refresh the tag cache, then immediately add a new head.
1479 # head, refresh the tag cache, then immediately add a new head.
1483 # But I think doing it this way is necessary for the "instant
1480 # But I think doing it this way is necessary for the "instant
1484 # tag cache retrieval" case to work.
1481 # tag cache retrieval" case to work.
1485 self.invalidatecaches()
1482 self.invalidatecaches()
1486
1483
1487 def walk(self, match, node=None):
1484 def walk(self, match, node=None):
1488 '''
1485 '''
1489 walk recursively through the directory tree or a given
1486 walk recursively through the directory tree or a given
1490 changeset, finding all files matched by the match
1487 changeset, finding all files matched by the match
1491 function
1488 function
1492 '''
1489 '''
1493 return self[node].walk(match)
1490 return self[node].walk(match)
1494
1491
1495 def status(self, node1='.', node2=None, match=None,
1492 def status(self, node1='.', node2=None, match=None,
1496 ignored=False, clean=False, unknown=False,
1493 ignored=False, clean=False, unknown=False,
1497 listsubrepos=False):
1494 listsubrepos=False):
1498 """return status of files between two nodes or node and working
1495 """return status of files between two nodes or node and working
1499 directory.
1496 directory.
1500
1497
1501 If node1 is None, use the first dirstate parent instead.
1498 If node1 is None, use the first dirstate parent instead.
1502 If node2 is None, compare node1 with working directory.
1499 If node2 is None, compare node1 with working directory.
1503 """
1500 """
1504
1501
1505 def mfmatches(ctx):
1502 def mfmatches(ctx):
1506 mf = ctx.manifest().copy()
1503 mf = ctx.manifest().copy()
1507 if match.always():
1504 if match.always():
1508 return mf
1505 return mf
1509 for fn in mf.keys():
1506 for fn in mf.keys():
1510 if not match(fn):
1507 if not match(fn):
1511 del mf[fn]
1508 del mf[fn]
1512 return mf
1509 return mf
1513
1510
1514 if isinstance(node1, context.changectx):
1511 if isinstance(node1, context.changectx):
1515 ctx1 = node1
1512 ctx1 = node1
1516 else:
1513 else:
1517 ctx1 = self[node1]
1514 ctx1 = self[node1]
1518 if isinstance(node2, context.changectx):
1515 if isinstance(node2, context.changectx):
1519 ctx2 = node2
1516 ctx2 = node2
1520 else:
1517 else:
1521 ctx2 = self[node2]
1518 ctx2 = self[node2]
1522
1519
1523 working = ctx2.rev() is None
1520 working = ctx2.rev() is None
1524 parentworking = working and ctx1 == self['.']
1521 parentworking = working and ctx1 == self['.']
1525 match = match or matchmod.always(self.root, self.getcwd())
1522 match = match or matchmod.always(self.root, self.getcwd())
1526 listignored, listclean, listunknown = ignored, clean, unknown
1523 listignored, listclean, listunknown = ignored, clean, unknown
1527
1524
1528 # load earliest manifest first for caching reasons
1525 # load earliest manifest first for caching reasons
1529 if not working and ctx2.rev() < ctx1.rev():
1526 if not working and ctx2.rev() < ctx1.rev():
1530 ctx2.manifest()
1527 ctx2.manifest()
1531
1528
1532 if not parentworking:
1529 if not parentworking:
1533 def bad(f, msg):
1530 def bad(f, msg):
1534 # 'f' may be a directory pattern from 'match.files()',
1531 # 'f' may be a directory pattern from 'match.files()',
1535 # so 'f not in ctx1' is not enough
1532 # so 'f not in ctx1' is not enough
1536 if f not in ctx1 and f not in ctx1.dirs():
1533 if f not in ctx1 and f not in ctx1.dirs():
1537 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1534 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1538 match.bad = bad
1535 match.bad = bad
1539
1536
1540 if working: # we need to scan the working dir
1537 if working: # we need to scan the working dir
1541 subrepos = []
1538 subrepos = []
1542 if '.hgsub' in self.dirstate:
1539 if '.hgsub' in self.dirstate:
1543 subrepos = ctx2.substate.keys()
1540 subrepos = ctx2.substate.keys()
1544 s = self.dirstate.status(match, subrepos, listignored,
1541 s = self.dirstate.status(match, subrepos, listignored,
1545 listclean, listunknown)
1542 listclean, listunknown)
1546 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1543 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1547
1544
1548 # check for any possibly clean files
1545 # check for any possibly clean files
1549 if parentworking and cmp:
1546 if parentworking and cmp:
1550 fixup = []
1547 fixup = []
1551 # do a full compare of any files that might have changed
1548 # do a full compare of any files that might have changed
1552 for f in sorted(cmp):
1549 for f in sorted(cmp):
1553 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1550 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1554 or ctx1[f].cmp(ctx2[f])):
1551 or ctx1[f].cmp(ctx2[f])):
1555 modified.append(f)
1552 modified.append(f)
1556 else:
1553 else:
1557 fixup.append(f)
1554 fixup.append(f)
1558
1555
1559 # update dirstate for files that are actually clean
1556 # update dirstate for files that are actually clean
1560 if fixup:
1557 if fixup:
1561 if listclean:
1558 if listclean:
1562 clean += fixup
1559 clean += fixup
1563
1560
1564 try:
1561 try:
1565 # updating the dirstate is optional
1562 # updating the dirstate is optional
1566 # so we don't wait on the lock
1563 # so we don't wait on the lock
1567 wlock = self.wlock(False)
1564 wlock = self.wlock(False)
1568 try:
1565 try:
1569 for f in fixup:
1566 for f in fixup:
1570 self.dirstate.normal(f)
1567 self.dirstate.normal(f)
1571 finally:
1568 finally:
1572 wlock.release()
1569 wlock.release()
1573 except error.LockError:
1570 except error.LockError:
1574 pass
1571 pass
1575
1572
1576 if not parentworking:
1573 if not parentworking:
1577 mf1 = mfmatches(ctx1)
1574 mf1 = mfmatches(ctx1)
1578 if working:
1575 if working:
1579 # we are comparing working dir against non-parent
1576 # we are comparing working dir against non-parent
1580 # generate a pseudo-manifest for the working dir
1577 # generate a pseudo-manifest for the working dir
1581 mf2 = mfmatches(self['.'])
1578 mf2 = mfmatches(self['.'])
1582 for f in cmp + modified + added:
1579 for f in cmp + modified + added:
1583 mf2[f] = None
1580 mf2[f] = None
1584 mf2.set(f, ctx2.flags(f))
1581 mf2.set(f, ctx2.flags(f))
1585 for f in removed:
1582 for f in removed:
1586 if f in mf2:
1583 if f in mf2:
1587 del mf2[f]
1584 del mf2[f]
1588 else:
1585 else:
1589 # we are comparing two revisions
1586 # we are comparing two revisions
1590 deleted, unknown, ignored = [], [], []
1587 deleted, unknown, ignored = [], [], []
1591 mf2 = mfmatches(ctx2)
1588 mf2 = mfmatches(ctx2)
1592
1589
1593 modified, added, clean = [], [], []
1590 modified, added, clean = [], [], []
1594 withflags = mf1.withflags() | mf2.withflags()
1591 withflags = mf1.withflags() | mf2.withflags()
1595 for fn in mf2:
1592 for fn in mf2:
1596 if fn in mf1:
1593 if fn in mf1:
1597 if (fn not in deleted and
1594 if (fn not in deleted and
1598 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1595 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1599 (mf1[fn] != mf2[fn] and
1596 (mf1[fn] != mf2[fn] and
1600 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1597 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1601 modified.append(fn)
1598 modified.append(fn)
1602 elif listclean:
1599 elif listclean:
1603 clean.append(fn)
1600 clean.append(fn)
1604 del mf1[fn]
1601 del mf1[fn]
1605 elif fn not in deleted:
1602 elif fn not in deleted:
1606 added.append(fn)
1603 added.append(fn)
1607 removed = mf1.keys()
1604 removed = mf1.keys()
1608
1605
1609 if working and modified and not self.dirstate._checklink:
1606 if working and modified and not self.dirstate._checklink:
1610 # Symlink placeholders may get non-symlink-like contents
1607 # Symlink placeholders may get non-symlink-like contents
1611 # via user error or dereferencing by NFS or Samba servers,
1608 # via user error or dereferencing by NFS or Samba servers,
1612 # so we filter out any placeholders that don't look like a
1609 # so we filter out any placeholders that don't look like a
1613 # symlink
1610 # symlink
1614 sane = []
1611 sane = []
1615 for f in modified:
1612 for f in modified:
1616 if ctx2.flags(f) == 'l':
1613 if ctx2.flags(f) == 'l':
1617 d = ctx2[f].data()
1614 d = ctx2[f].data()
1618 if len(d) >= 1024 or '\n' in d or util.binary(d):
1615 if len(d) >= 1024 or '\n' in d or util.binary(d):
1619 self.ui.debug('ignoring suspect symlink placeholder'
1616 self.ui.debug('ignoring suspect symlink placeholder'
1620 ' "%s"\n' % f)
1617 ' "%s"\n' % f)
1621 continue
1618 continue
1622 sane.append(f)
1619 sane.append(f)
1623 modified = sane
1620 modified = sane
1624
1621
1625 r = modified, added, removed, deleted, unknown, ignored, clean
1622 r = modified, added, removed, deleted, unknown, ignored, clean
1626
1623
1627 if listsubrepos:
1624 if listsubrepos:
1628 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1625 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1629 if working:
1626 if working:
1630 rev2 = None
1627 rev2 = None
1631 else:
1628 else:
1632 rev2 = ctx2.substate[subpath][1]
1629 rev2 = ctx2.substate[subpath][1]
1633 try:
1630 try:
1634 submatch = matchmod.narrowmatcher(subpath, match)
1631 submatch = matchmod.narrowmatcher(subpath, match)
1635 s = sub.status(rev2, match=submatch, ignored=listignored,
1632 s = sub.status(rev2, match=submatch, ignored=listignored,
1636 clean=listclean, unknown=listunknown,
1633 clean=listclean, unknown=listunknown,
1637 listsubrepos=True)
1634 listsubrepos=True)
1638 for rfiles, sfiles in zip(r, s):
1635 for rfiles, sfiles in zip(r, s):
1639 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1636 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1640 except error.LookupError:
1637 except error.LookupError:
1641 self.ui.status(_("skipping missing subrepository: %s\n")
1638 self.ui.status(_("skipping missing subrepository: %s\n")
1642 % subpath)
1639 % subpath)
1643
1640
1644 for l in r:
1641 for l in r:
1645 l.sort()
1642 l.sort()
1646 return r
1643 return r
1647
1644
1648 def heads(self, start=None):
1645 def heads(self, start=None):
1649 heads = self.changelog.heads(start)
1646 heads = self.changelog.heads(start)
1650 # sort the output in rev descending order
1647 # sort the output in rev descending order
1651 return sorted(heads, key=self.changelog.rev, reverse=True)
1648 return sorted(heads, key=self.changelog.rev, reverse=True)
1652
1649
1653 def branchheads(self, branch=None, start=None, closed=False):
1650 def branchheads(self, branch=None, start=None, closed=False):
1654 '''return a (possibly filtered) list of heads for the given branch
1651 '''return a (possibly filtered) list of heads for the given branch
1655
1652
1656 Heads are returned in topological order, from newest to oldest.
1653 Heads are returned in topological order, from newest to oldest.
1657 If branch is None, use the dirstate branch.
1654 If branch is None, use the dirstate branch.
1658 If start is not None, return only heads reachable from start.
1655 If start is not None, return only heads reachable from start.
1659 If closed is True, return heads that are marked as closed as well.
1656 If closed is True, return heads that are marked as closed as well.
1660 '''
1657 '''
1661 if branch is None:
1658 if branch is None:
1662 branch = self[None].branch()
1659 branch = self[None].branch()
1663 branches = self.branchmap()
1660 branches = self.branchmap()
1664 if branch not in branches:
1661 if branch not in branches:
1665 return []
1662 return []
1666 # the cache returns heads ordered lowest to highest
1663 # the cache returns heads ordered lowest to highest
1667 bheads = list(reversed(branches[branch]))
1664 bheads = list(reversed(branches[branch]))
1668 if start is not None:
1665 if start is not None:
1669 # filter out the heads that cannot be reached from startrev
1666 # filter out the heads that cannot be reached from startrev
1670 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1667 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1671 bheads = [h for h in bheads if h in fbheads]
1668 bheads = [h for h in bheads if h in fbheads]
1672 if not closed:
1669 if not closed:
1673 bheads = [h for h in bheads if not self[h].closesbranch()]
1670 bheads = [h for h in bheads if not self[h].closesbranch()]
1674 return bheads
1671 return bheads
1675
1672
1676 def branches(self, nodes):
1673 def branches(self, nodes):
1677 if not nodes:
1674 if not nodes:
1678 nodes = [self.changelog.tip()]
1675 nodes = [self.changelog.tip()]
1679 b = []
1676 b = []
1680 for n in nodes:
1677 for n in nodes:
1681 t = n
1678 t = n
1682 while True:
1679 while True:
1683 p = self.changelog.parents(n)
1680 p = self.changelog.parents(n)
1684 if p[1] != nullid or p[0] == nullid:
1681 if p[1] != nullid or p[0] == nullid:
1685 b.append((t, n, p[0], p[1]))
1682 b.append((t, n, p[0], p[1]))
1686 break
1683 break
1687 n = p[0]
1684 n = p[0]
1688 return b
1685 return b
1689
1686
1690 def between(self, pairs):
1687 def between(self, pairs):
1691 r = []
1688 r = []
1692
1689
1693 for top, bottom in pairs:
1690 for top, bottom in pairs:
1694 n, l, i = top, [], 0
1691 n, l, i = top, [], 0
1695 f = 1
1692 f = 1
1696
1693
1697 while n != bottom and n != nullid:
1694 while n != bottom and n != nullid:
1698 p = self.changelog.parents(n)[0]
1695 p = self.changelog.parents(n)[0]
1699 if i == f:
1696 if i == f:
1700 l.append(n)
1697 l.append(n)
1701 f = f * 2
1698 f = f * 2
1702 n = p
1699 n = p
1703 i += 1
1700 i += 1
1704
1701
1705 r.append(l)
1702 r.append(l)
1706
1703
1707 return r
1704 return r
1708
1705
1709 def pull(self, remote, heads=None, force=False):
1706 def pull(self, remote, heads=None, force=False):
1710 # don't open transaction for nothing or you break future useful
1707 # don't open transaction for nothing or you break future useful
1711 # rollback call
1708 # rollback call
1712 tr = None
1709 tr = None
1713 trname = 'pull\n' + util.hidepassword(remote.url())
1710 trname = 'pull\n' + util.hidepassword(remote.url())
1714 lock = self.lock()
1711 lock = self.lock()
1715 try:
1712 try:
1716 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1713 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1717 force=force)
1714 force=force)
1718 common, fetch, rheads = tmp
1715 common, fetch, rheads = tmp
1719 if not fetch:
1716 if not fetch:
1720 self.ui.status(_("no changes found\n"))
1717 self.ui.status(_("no changes found\n"))
1721 added = []
1718 added = []
1722 result = 0
1719 result = 0
1723 else:
1720 else:
1724 tr = self.transaction(trname)
1721 tr = self.transaction(trname)
1725 if heads is None and list(common) == [nullid]:
1722 if heads is None and list(common) == [nullid]:
1726 self.ui.status(_("requesting all changes\n"))
1723 self.ui.status(_("requesting all changes\n"))
1727 elif heads is None and remote.capable('changegroupsubset'):
1724 elif heads is None and remote.capable('changegroupsubset'):
1728 # issue1320, avoid a race if remote changed after discovery
1725 # issue1320, avoid a race if remote changed after discovery
1729 heads = rheads
1726 heads = rheads
1730
1727
1731 if remote.capable('getbundle'):
1728 if remote.capable('getbundle'):
1732 cg = remote.getbundle('pull', common=common,
1729 cg = remote.getbundle('pull', common=common,
1733 heads=heads or rheads)
1730 heads=heads or rheads)
1734 elif heads is None:
1731 elif heads is None:
1735 cg = remote.changegroup(fetch, 'pull')
1732 cg = remote.changegroup(fetch, 'pull')
1736 elif not remote.capable('changegroupsubset'):
1733 elif not remote.capable('changegroupsubset'):
1737 raise util.Abort(_("partial pull cannot be done because "
1734 raise util.Abort(_("partial pull cannot be done because "
1738 "other repository doesn't support "
1735 "other repository doesn't support "
1739 "changegroupsubset."))
1736 "changegroupsubset."))
1740 else:
1737 else:
1741 cg = remote.changegroupsubset(fetch, heads, 'pull')
1738 cg = remote.changegroupsubset(fetch, heads, 'pull')
1742 clstart = len(self.changelog)
1739 clstart = len(self.changelog)
1743 result = self.addchangegroup(cg, 'pull', remote.url())
1740 result = self.addchangegroup(cg, 'pull', remote.url())
1744 clend = len(self.changelog)
1741 clend = len(self.changelog)
1745 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1742 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1746
1743
1747 # compute target subset
1744 # compute target subset
1748 if heads is None:
1745 if heads is None:
1749 # We pulled every thing possible
1746 # We pulled every thing possible
1750 # sync on everything common
1747 # sync on everything common
1751 subset = common + added
1748 subset = common + added
1752 else:
1749 else:
1753 # We pulled a specific subset
1750 # We pulled a specific subset
1754 # sync on this subset
1751 # sync on this subset
1755 subset = heads
1752 subset = heads
1756
1753
1757 # Get remote phases data from remote
1754 # Get remote phases data from remote
1758 remotephases = remote.listkeys('phases')
1755 remotephases = remote.listkeys('phases')
1759 publishing = bool(remotephases.get('publishing', False))
1756 publishing = bool(remotephases.get('publishing', False))
1760 if remotephases and not publishing:
1757 if remotephases and not publishing:
1761 # remote is new and unpublishing
1758 # remote is new and unpublishing
1762 pheads, _dr = phases.analyzeremotephases(self, subset,
1759 pheads, _dr = phases.analyzeremotephases(self, subset,
1763 remotephases)
1760 remotephases)
1764 phases.advanceboundary(self, phases.public, pheads)
1761 phases.advanceboundary(self, phases.public, pheads)
1765 phases.advanceboundary(self, phases.draft, subset)
1762 phases.advanceboundary(self, phases.draft, subset)
1766 else:
1763 else:
1767 # Remote is old or publishing all common changesets
1764 # Remote is old or publishing all common changesets
1768 # should be seen as public
1765 # should be seen as public
1769 phases.advanceboundary(self, phases.public, subset)
1766 phases.advanceboundary(self, phases.public, subset)
1770
1767
1771 remoteobs = remote.listkeys('obsolete')
1768 remoteobs = remote.listkeys('obsolete')
1772 if 'dump' in remoteobs:
1769 if 'dump' in remoteobs:
1773 if tr is None:
1770 if tr is None:
1774 tr = self.transaction(trname)
1771 tr = self.transaction(trname)
1775 data = base85.b85decode(remoteobs['dump'])
1772 data = base85.b85decode(remoteobs['dump'])
1776 self.obsstore.mergemarkers(tr, data)
1773 self.obsstore.mergemarkers(tr, data)
1777 if tr is not None:
1774 if tr is not None:
1778 tr.close()
1775 tr.close()
1779 finally:
1776 finally:
1780 if tr is not None:
1777 if tr is not None:
1781 tr.release()
1778 tr.release()
1782 lock.release()
1779 lock.release()
1783
1780
1784 return result
1781 return result
1785
1782
1786 def checkpush(self, force, revs):
1783 def checkpush(self, force, revs):
1787 """Extensions can override this function if additional checks have
1784 """Extensions can override this function if additional checks have
1788 to be performed before pushing, or call it if they override push
1785 to be performed before pushing, or call it if they override push
1789 command.
1786 command.
1790 """
1787 """
1791 pass
1788 pass
1792
1789
1793 def push(self, remote, force=False, revs=None, newbranch=False):
1790 def push(self, remote, force=False, revs=None, newbranch=False):
1794 '''Push outgoing changesets (limited by revs) from the current
1791 '''Push outgoing changesets (limited by revs) from the current
1795 repository to remote. Return an integer:
1792 repository to remote. Return an integer:
1796 - None means nothing to push
1793 - None means nothing to push
1797 - 0 means HTTP error
1794 - 0 means HTTP error
1798 - 1 means we pushed and remote head count is unchanged *or*
1795 - 1 means we pushed and remote head count is unchanged *or*
1799 we have outgoing changesets but refused to push
1796 we have outgoing changesets but refused to push
1800 - other values as described by addchangegroup()
1797 - other values as described by addchangegroup()
1801 '''
1798 '''
1802 # there are two ways to push to remote repo:
1799 # there are two ways to push to remote repo:
1803 #
1800 #
1804 # addchangegroup assumes local user can lock remote
1801 # addchangegroup assumes local user can lock remote
1805 # repo (local filesystem, old ssh servers).
1802 # repo (local filesystem, old ssh servers).
1806 #
1803 #
1807 # unbundle assumes local user cannot lock remote repo (new ssh
1804 # unbundle assumes local user cannot lock remote repo (new ssh
1808 # servers, http servers).
1805 # servers, http servers).
1809
1806
1810 if not remote.canpush():
1807 if not remote.canpush():
1811 raise util.Abort(_("destination does not support push"))
1808 raise util.Abort(_("destination does not support push"))
1812 # get local lock as we might write phase data
1809 # get local lock as we might write phase data
1813 locallock = self.lock()
1810 locallock = self.lock()
1814 try:
1811 try:
1815 self.checkpush(force, revs)
1812 self.checkpush(force, revs)
1816 lock = None
1813 lock = None
1817 unbundle = remote.capable('unbundle')
1814 unbundle = remote.capable('unbundle')
1818 if not unbundle:
1815 if not unbundle:
1819 lock = remote.lock()
1816 lock = remote.lock()
1820 try:
1817 try:
1821 # discovery
1818 # discovery
1822 fci = discovery.findcommonincoming
1819 fci = discovery.findcommonincoming
1823 commoninc = fci(self, remote, force=force)
1820 commoninc = fci(self, remote, force=force)
1824 common, inc, remoteheads = commoninc
1821 common, inc, remoteheads = commoninc
1825 fco = discovery.findcommonoutgoing
1822 fco = discovery.findcommonoutgoing
1826 outgoing = fco(self, remote, onlyheads=revs,
1823 outgoing = fco(self, remote, onlyheads=revs,
1827 commoninc=commoninc, force=force)
1824 commoninc=commoninc, force=force)
1828
1825
1829
1826
1830 if not outgoing.missing:
1827 if not outgoing.missing:
1831 # nothing to push
1828 # nothing to push
1832 scmutil.nochangesfound(self.ui, outgoing.excluded)
1829 scmutil.nochangesfound(self.ui, outgoing.excluded)
1833 ret = None
1830 ret = None
1834 else:
1831 else:
1835 # something to push
1832 # something to push
1836 if not force:
1833 if not force:
1837 # if self.obsstore == False --> no obsolete
1834 # if self.obsstore == False --> no obsolete
1838 # then, save the iteration
1835 # then, save the iteration
1839 if self.obsstore:
1836 if self.obsstore:
1840 # this message are here for 80 char limit reason
1837 # this message are here for 80 char limit reason
1841 mso = _("push includes an obsolete changeset: %s!")
1838 mso = _("push includes an obsolete changeset: %s!")
1842 msu = _("push includes an unstable changeset: %s!")
1839 msu = _("push includes an unstable changeset: %s!")
1843 # If we are to push if there is at least one
1840 # If we are to push if there is at least one
1844 # obsolete or unstable changeset in missing, at
1841 # obsolete or unstable changeset in missing, at
1845 # least one of the missinghead will be obsolete or
1842 # least one of the missinghead will be obsolete or
1846 # unstable. So checking heads only is ok
1843 # unstable. So checking heads only is ok
1847 for node in outgoing.missingheads:
1844 for node in outgoing.missingheads:
1848 ctx = self[node]
1845 ctx = self[node]
1849 if ctx.obsolete():
1846 if ctx.obsolete():
1850 raise util.Abort(_(mso) % ctx)
1847 raise util.Abort(_(mso) % ctx)
1851 elif ctx.unstable():
1848 elif ctx.unstable():
1852 raise util.Abort(_(msu) % ctx)
1849 raise util.Abort(_(msu) % ctx)
1853 discovery.checkheads(self, remote, outgoing,
1850 discovery.checkheads(self, remote, outgoing,
1854 remoteheads, newbranch,
1851 remoteheads, newbranch,
1855 bool(inc))
1852 bool(inc))
1856
1853
1857 # create a changegroup from local
1854 # create a changegroup from local
1858 if revs is None and not outgoing.excluded:
1855 if revs is None and not outgoing.excluded:
1859 # push everything,
1856 # push everything,
1860 # use the fast path, no race possible on push
1857 # use the fast path, no race possible on push
1861 cg = self._changegroup(outgoing.missing, 'push')
1858 cg = self._changegroup(outgoing.missing, 'push')
1862 else:
1859 else:
1863 cg = self.getlocalbundle('push', outgoing)
1860 cg = self.getlocalbundle('push', outgoing)
1864
1861
1865 # apply changegroup to remote
1862 # apply changegroup to remote
1866 if unbundle:
1863 if unbundle:
1867 # local repo finds heads on server, finds out what
1864 # local repo finds heads on server, finds out what
1868 # revs it must push. once revs transferred, if server
1865 # revs it must push. once revs transferred, if server
1869 # finds it has different heads (someone else won
1866 # finds it has different heads (someone else won
1870 # commit/push race), server aborts.
1867 # commit/push race), server aborts.
1871 if force:
1868 if force:
1872 remoteheads = ['force']
1869 remoteheads = ['force']
1873 # ssh: return remote's addchangegroup()
1870 # ssh: return remote's addchangegroup()
1874 # http: return remote's addchangegroup() or 0 for error
1871 # http: return remote's addchangegroup() or 0 for error
1875 ret = remote.unbundle(cg, remoteheads, 'push')
1872 ret = remote.unbundle(cg, remoteheads, 'push')
1876 else:
1873 else:
1877 # we return an integer indicating remote head count
1874 # we return an integer indicating remote head count
1878 # change
1875 # change
1879 ret = remote.addchangegroup(cg, 'push', self.url())
1876 ret = remote.addchangegroup(cg, 'push', self.url())
1880
1877
1881 if ret:
1878 if ret:
1882 # push succeed, synchonize target of the push
1879 # push succeed, synchonize target of the push
1883 cheads = outgoing.missingheads
1880 cheads = outgoing.missingheads
1884 elif revs is None:
1881 elif revs is None:
1885 # All out push fails. synchronize all common
1882 # All out push fails. synchronize all common
1886 cheads = outgoing.commonheads
1883 cheads = outgoing.commonheads
1887 else:
1884 else:
1888 # I want cheads = heads(::missingheads and ::commonheads)
1885 # I want cheads = heads(::missingheads and ::commonheads)
1889 # (missingheads is revs with secret changeset filtered out)
1886 # (missingheads is revs with secret changeset filtered out)
1890 #
1887 #
1891 # This can be expressed as:
1888 # This can be expressed as:
1892 # cheads = ( (missingheads and ::commonheads)
1889 # cheads = ( (missingheads and ::commonheads)
1893 # + (commonheads and ::missingheads))"
1890 # + (commonheads and ::missingheads))"
1894 # )
1891 # )
1895 #
1892 #
1896 # while trying to push we already computed the following:
1893 # while trying to push we already computed the following:
1897 # common = (::commonheads)
1894 # common = (::commonheads)
1898 # missing = ((commonheads::missingheads) - commonheads)
1895 # missing = ((commonheads::missingheads) - commonheads)
1899 #
1896 #
1900 # We can pick:
1897 # We can pick:
1901 # * missingheads part of comon (::commonheads)
1898 # * missingheads part of comon (::commonheads)
1902 common = set(outgoing.common)
1899 common = set(outgoing.common)
1903 cheads = [node for node in revs if node in common]
1900 cheads = [node for node in revs if node in common]
1904 # and
1901 # and
1905 # * commonheads parents on missing
1902 # * commonheads parents on missing
1906 revset = self.set('%ln and parents(roots(%ln))',
1903 revset = self.set('%ln and parents(roots(%ln))',
1907 outgoing.commonheads,
1904 outgoing.commonheads,
1908 outgoing.missing)
1905 outgoing.missing)
1909 cheads.extend(c.node() for c in revset)
1906 cheads.extend(c.node() for c in revset)
1910 # even when we don't push, exchanging phase data is useful
1907 # even when we don't push, exchanging phase data is useful
1911 remotephases = remote.listkeys('phases')
1908 remotephases = remote.listkeys('phases')
1912 if not remotephases: # old server or public only repo
1909 if not remotephases: # old server or public only repo
1913 phases.advanceboundary(self, phases.public, cheads)
1910 phases.advanceboundary(self, phases.public, cheads)
1914 # don't push any phase data as there is nothing to push
1911 # don't push any phase data as there is nothing to push
1915 else:
1912 else:
1916 ana = phases.analyzeremotephases(self, cheads, remotephases)
1913 ana = phases.analyzeremotephases(self, cheads, remotephases)
1917 pheads, droots = ana
1914 pheads, droots = ana
1918 ### Apply remote phase on local
1915 ### Apply remote phase on local
1919 if remotephases.get('publishing', False):
1916 if remotephases.get('publishing', False):
1920 phases.advanceboundary(self, phases.public, cheads)
1917 phases.advanceboundary(self, phases.public, cheads)
1921 else: # publish = False
1918 else: # publish = False
1922 phases.advanceboundary(self, phases.public, pheads)
1919 phases.advanceboundary(self, phases.public, pheads)
1923 phases.advanceboundary(self, phases.draft, cheads)
1920 phases.advanceboundary(self, phases.draft, cheads)
1924 ### Apply local phase on remote
1921 ### Apply local phase on remote
1925
1922
1926 # Get the list of all revs draft on remote by public here.
1923 # Get the list of all revs draft on remote by public here.
1927 # XXX Beware that revset break if droots is not strictly
1924 # XXX Beware that revset break if droots is not strictly
1928 # XXX root we may want to ensure it is but it is costly
1925 # XXX root we may want to ensure it is but it is costly
1929 outdated = self.set('heads((%ln::%ln) and public())',
1926 outdated = self.set('heads((%ln::%ln) and public())',
1930 droots, cheads)
1927 droots, cheads)
1931 for newremotehead in outdated:
1928 for newremotehead in outdated:
1932 r = remote.pushkey('phases',
1929 r = remote.pushkey('phases',
1933 newremotehead.hex(),
1930 newremotehead.hex(),
1934 str(phases.draft),
1931 str(phases.draft),
1935 str(phases.public))
1932 str(phases.public))
1936 if not r:
1933 if not r:
1937 self.ui.warn(_('updating %s to public failed!\n')
1934 self.ui.warn(_('updating %s to public failed!\n')
1938 % newremotehead)
1935 % newremotehead)
1939 if ('obsolete' in remote.listkeys('namespaces')
1936 if ('obsolete' in remote.listkeys('namespaces')
1940 and self.obsstore):
1937 and self.obsstore):
1941 data = self.listkeys('obsolete')['dump']
1938 data = self.listkeys('obsolete')['dump']
1942 r = remote.pushkey('obsolete', 'dump', '', data)
1939 r = remote.pushkey('obsolete', 'dump', '', data)
1943 if not r:
1940 if not r:
1944 self.ui.warn(_('failed to push obsolete markers!\n'))
1941 self.ui.warn(_('failed to push obsolete markers!\n'))
1945 finally:
1942 finally:
1946 if lock is not None:
1943 if lock is not None:
1947 lock.release()
1944 lock.release()
1948 finally:
1945 finally:
1949 locallock.release()
1946 locallock.release()
1950
1947
1951 self.ui.debug("checking for updated bookmarks\n")
1948 self.ui.debug("checking for updated bookmarks\n")
1952 rb = remote.listkeys('bookmarks')
1949 rb = remote.listkeys('bookmarks')
1953 for k in rb.keys():
1950 for k in rb.keys():
1954 if k in self._bookmarks:
1951 if k in self._bookmarks:
1955 nr, nl = rb[k], hex(self._bookmarks[k])
1952 nr, nl = rb[k], hex(self._bookmarks[k])
1956 if nr in self:
1953 if nr in self:
1957 cr = self[nr]
1954 cr = self[nr]
1958 cl = self[nl]
1955 cl = self[nl]
1959 if cl in cr.descendants():
1956 if cl in cr.descendants():
1960 r = remote.pushkey('bookmarks', k, nr, nl)
1957 r = remote.pushkey('bookmarks', k, nr, nl)
1961 if r:
1958 if r:
1962 self.ui.status(_("updating bookmark %s\n") % k)
1959 self.ui.status(_("updating bookmark %s\n") % k)
1963 else:
1960 else:
1964 self.ui.warn(_('updating bookmark %s'
1961 self.ui.warn(_('updating bookmark %s'
1965 ' failed!\n') % k)
1962 ' failed!\n') % k)
1966
1963
1967 return ret
1964 return ret
1968
1965
1969 def changegroupinfo(self, nodes, source):
1966 def changegroupinfo(self, nodes, source):
1970 if self.ui.verbose or source == 'bundle':
1967 if self.ui.verbose or source == 'bundle':
1971 self.ui.status(_("%d changesets found\n") % len(nodes))
1968 self.ui.status(_("%d changesets found\n") % len(nodes))
1972 if self.ui.debugflag:
1969 if self.ui.debugflag:
1973 self.ui.debug("list of changesets:\n")
1970 self.ui.debug("list of changesets:\n")
1974 for node in nodes:
1971 for node in nodes:
1975 self.ui.debug("%s\n" % hex(node))
1972 self.ui.debug("%s\n" % hex(node))
1976
1973
1977 def changegroupsubset(self, bases, heads, source):
1974 def changegroupsubset(self, bases, heads, source):
1978 """Compute a changegroup consisting of all the nodes that are
1975 """Compute a changegroup consisting of all the nodes that are
1979 descendants of any of the bases and ancestors of any of the heads.
1976 descendants of any of the bases and ancestors of any of the heads.
1980 Return a chunkbuffer object whose read() method will return
1977 Return a chunkbuffer object whose read() method will return
1981 successive changegroup chunks.
1978 successive changegroup chunks.
1982
1979
1983 It is fairly complex as determining which filenodes and which
1980 It is fairly complex as determining which filenodes and which
1984 manifest nodes need to be included for the changeset to be complete
1981 manifest nodes need to be included for the changeset to be complete
1985 is non-trivial.
1982 is non-trivial.
1986
1983
1987 Another wrinkle is doing the reverse, figuring out which changeset in
1984 Another wrinkle is doing the reverse, figuring out which changeset in
1988 the changegroup a particular filenode or manifestnode belongs to.
1985 the changegroup a particular filenode or manifestnode belongs to.
1989 """
1986 """
1990 cl = self.changelog
1987 cl = self.changelog
1991 if not bases:
1988 if not bases:
1992 bases = [nullid]
1989 bases = [nullid]
1993 csets, bases, heads = cl.nodesbetween(bases, heads)
1990 csets, bases, heads = cl.nodesbetween(bases, heads)
1994 # We assume that all ancestors of bases are known
1991 # We assume that all ancestors of bases are known
1995 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1992 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1996 return self._changegroupsubset(common, csets, heads, source)
1993 return self._changegroupsubset(common, csets, heads, source)
1997
1994
1998 def getlocalbundle(self, source, outgoing):
1995 def getlocalbundle(self, source, outgoing):
1999 """Like getbundle, but taking a discovery.outgoing as an argument.
1996 """Like getbundle, but taking a discovery.outgoing as an argument.
2000
1997
2001 This is only implemented for local repos and reuses potentially
1998 This is only implemented for local repos and reuses potentially
2002 precomputed sets in outgoing."""
1999 precomputed sets in outgoing."""
2003 if not outgoing.missing:
2000 if not outgoing.missing:
2004 return None
2001 return None
2005 return self._changegroupsubset(outgoing.common,
2002 return self._changegroupsubset(outgoing.common,
2006 outgoing.missing,
2003 outgoing.missing,
2007 outgoing.missingheads,
2004 outgoing.missingheads,
2008 source)
2005 source)
2009
2006
2010 def getbundle(self, source, heads=None, common=None):
2007 def getbundle(self, source, heads=None, common=None):
2011 """Like changegroupsubset, but returns the set difference between the
2008 """Like changegroupsubset, but returns the set difference between the
2012 ancestors of heads and the ancestors common.
2009 ancestors of heads and the ancestors common.
2013
2010
2014 If heads is None, use the local heads. If common is None, use [nullid].
2011 If heads is None, use the local heads. If common is None, use [nullid].
2015
2012
2016 The nodes in common might not all be known locally due to the way the
2013 The nodes in common might not all be known locally due to the way the
2017 current discovery protocol works.
2014 current discovery protocol works.
2018 """
2015 """
2019 cl = self.changelog
2016 cl = self.changelog
2020 if common:
2017 if common:
2021 nm = cl.nodemap
2018 nm = cl.nodemap
2022 common = [n for n in common if n in nm]
2019 common = [n for n in common if n in nm]
2023 else:
2020 else:
2024 common = [nullid]
2021 common = [nullid]
2025 if not heads:
2022 if not heads:
2026 heads = cl.heads()
2023 heads = cl.heads()
2027 return self.getlocalbundle(source,
2024 return self.getlocalbundle(source,
2028 discovery.outgoing(cl, common, heads))
2025 discovery.outgoing(cl, common, heads))
2029
2026
2030 def _changegroupsubset(self, commonrevs, csets, heads, source):
2027 def _changegroupsubset(self, commonrevs, csets, heads, source):
2031
2028
2032 cl = self.changelog
2029 cl = self.changelog
2033 mf = self.manifest
2030 mf = self.manifest
2034 mfs = {} # needed manifests
2031 mfs = {} # needed manifests
2035 fnodes = {} # needed file nodes
2032 fnodes = {} # needed file nodes
2036 changedfiles = set()
2033 changedfiles = set()
2037 fstate = ['', {}]
2034 fstate = ['', {}]
2038 count = [0, 0]
2035 count = [0, 0]
2039
2036
2040 # can we go through the fast path ?
2037 # can we go through the fast path ?
2041 heads.sort()
2038 heads.sort()
2042 if heads == sorted(self.heads()):
2039 if heads == sorted(self.heads()):
2043 return self._changegroup(csets, source)
2040 return self._changegroup(csets, source)
2044
2041
2045 # slow path
2042 # slow path
2046 self.hook('preoutgoing', throw=True, source=source)
2043 self.hook('preoutgoing', throw=True, source=source)
2047 self.changegroupinfo(csets, source)
2044 self.changegroupinfo(csets, source)
2048
2045
2049 # filter any nodes that claim to be part of the known set
2046 # filter any nodes that claim to be part of the known set
2050 def prune(revlog, missing):
2047 def prune(revlog, missing):
2051 rr, rl = revlog.rev, revlog.linkrev
2048 rr, rl = revlog.rev, revlog.linkrev
2052 return [n for n in missing
2049 return [n for n in missing
2053 if rl(rr(n)) not in commonrevs]
2050 if rl(rr(n)) not in commonrevs]
2054
2051
2055 progress = self.ui.progress
2052 progress = self.ui.progress
2056 _bundling = _('bundling')
2053 _bundling = _('bundling')
2057 _changesets = _('changesets')
2054 _changesets = _('changesets')
2058 _manifests = _('manifests')
2055 _manifests = _('manifests')
2059 _files = _('files')
2056 _files = _('files')
2060
2057
2061 def lookup(revlog, x):
2058 def lookup(revlog, x):
2062 if revlog == cl:
2059 if revlog == cl:
2063 c = cl.read(x)
2060 c = cl.read(x)
2064 changedfiles.update(c[3])
2061 changedfiles.update(c[3])
2065 mfs.setdefault(c[0], x)
2062 mfs.setdefault(c[0], x)
2066 count[0] += 1
2063 count[0] += 1
2067 progress(_bundling, count[0],
2064 progress(_bundling, count[0],
2068 unit=_changesets, total=count[1])
2065 unit=_changesets, total=count[1])
2069 return x
2066 return x
2070 elif revlog == mf:
2067 elif revlog == mf:
2071 clnode = mfs[x]
2068 clnode = mfs[x]
2072 mdata = mf.readfast(x)
2069 mdata = mf.readfast(x)
2073 for f, n in mdata.iteritems():
2070 for f, n in mdata.iteritems():
2074 if f in changedfiles:
2071 if f in changedfiles:
2075 fnodes[f].setdefault(n, clnode)
2072 fnodes[f].setdefault(n, clnode)
2076 count[0] += 1
2073 count[0] += 1
2077 progress(_bundling, count[0],
2074 progress(_bundling, count[0],
2078 unit=_manifests, total=count[1])
2075 unit=_manifests, total=count[1])
2079 return clnode
2076 return clnode
2080 else:
2077 else:
2081 progress(_bundling, count[0], item=fstate[0],
2078 progress(_bundling, count[0], item=fstate[0],
2082 unit=_files, total=count[1])
2079 unit=_files, total=count[1])
2083 return fstate[1][x]
2080 return fstate[1][x]
2084
2081
2085 bundler = changegroup.bundle10(lookup)
2082 bundler = changegroup.bundle10(lookup)
2086 reorder = self.ui.config('bundle', 'reorder', 'auto')
2083 reorder = self.ui.config('bundle', 'reorder', 'auto')
2087 if reorder == 'auto':
2084 if reorder == 'auto':
2088 reorder = None
2085 reorder = None
2089 else:
2086 else:
2090 reorder = util.parsebool(reorder)
2087 reorder = util.parsebool(reorder)
2091
2088
2092 def gengroup():
2089 def gengroup():
2093 # Create a changenode group generator that will call our functions
2090 # Create a changenode group generator that will call our functions
2094 # back to lookup the owning changenode and collect information.
2091 # back to lookup the owning changenode and collect information.
2095 count[:] = [0, len(csets)]
2092 count[:] = [0, len(csets)]
2096 for chunk in cl.group(csets, bundler, reorder=reorder):
2093 for chunk in cl.group(csets, bundler, reorder=reorder):
2097 yield chunk
2094 yield chunk
2098 progress(_bundling, None)
2095 progress(_bundling, None)
2099
2096
2100 # Create a generator for the manifestnodes that calls our lookup
2097 # Create a generator for the manifestnodes that calls our lookup
2101 # and data collection functions back.
2098 # and data collection functions back.
2102 for f in changedfiles:
2099 for f in changedfiles:
2103 fnodes[f] = {}
2100 fnodes[f] = {}
2104 count[:] = [0, len(mfs)]
2101 count[:] = [0, len(mfs)]
2105 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2102 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2106 yield chunk
2103 yield chunk
2107 progress(_bundling, None)
2104 progress(_bundling, None)
2108
2105
2109 mfs.clear()
2106 mfs.clear()
2110
2107
2111 # Go through all our files in order sorted by name.
2108 # Go through all our files in order sorted by name.
2112 count[:] = [0, len(changedfiles)]
2109 count[:] = [0, len(changedfiles)]
2113 for fname in sorted(changedfiles):
2110 for fname in sorted(changedfiles):
2114 filerevlog = self.file(fname)
2111 filerevlog = self.file(fname)
2115 if not len(filerevlog):
2112 if not len(filerevlog):
2116 raise util.Abort(_("empty or missing revlog for %s")
2113 raise util.Abort(_("empty or missing revlog for %s")
2117 % fname)
2114 % fname)
2118 fstate[0] = fname
2115 fstate[0] = fname
2119 fstate[1] = fnodes.pop(fname, {})
2116 fstate[1] = fnodes.pop(fname, {})
2120
2117
2121 nodelist = prune(filerevlog, fstate[1])
2118 nodelist = prune(filerevlog, fstate[1])
2122 if nodelist:
2119 if nodelist:
2123 count[0] += 1
2120 count[0] += 1
2124 yield bundler.fileheader(fname)
2121 yield bundler.fileheader(fname)
2125 for chunk in filerevlog.group(nodelist, bundler, reorder):
2122 for chunk in filerevlog.group(nodelist, bundler, reorder):
2126 yield chunk
2123 yield chunk
2127
2124
2128 # Signal that no more groups are left.
2125 # Signal that no more groups are left.
2129 yield bundler.close()
2126 yield bundler.close()
2130 progress(_bundling, None)
2127 progress(_bundling, None)
2131
2128
2132 if csets:
2129 if csets:
2133 self.hook('outgoing', node=hex(csets[0]), source=source)
2130 self.hook('outgoing', node=hex(csets[0]), source=source)
2134
2131
2135 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2132 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2136
2133
2137 def changegroup(self, basenodes, source):
2134 def changegroup(self, basenodes, source):
2138 # to avoid a race we use changegroupsubset() (issue1320)
2135 # to avoid a race we use changegroupsubset() (issue1320)
2139 return self.changegroupsubset(basenodes, self.heads(), source)
2136 return self.changegroupsubset(basenodes, self.heads(), source)
2140
2137
2141 def _changegroup(self, nodes, source):
2138 def _changegroup(self, nodes, source):
2142 """Compute the changegroup of all nodes that we have that a recipient
2139 """Compute the changegroup of all nodes that we have that a recipient
2143 doesn't. Return a chunkbuffer object whose read() method will return
2140 doesn't. Return a chunkbuffer object whose read() method will return
2144 successive changegroup chunks.
2141 successive changegroup chunks.
2145
2142
2146 This is much easier than the previous function as we can assume that
2143 This is much easier than the previous function as we can assume that
2147 the recipient has any changenode we aren't sending them.
2144 the recipient has any changenode we aren't sending them.
2148
2145
2149 nodes is the set of nodes to send"""
2146 nodes is the set of nodes to send"""
2150
2147
2151 cl = self.changelog
2148 cl = self.changelog
2152 mf = self.manifest
2149 mf = self.manifest
2153 mfs = {}
2150 mfs = {}
2154 changedfiles = set()
2151 changedfiles = set()
2155 fstate = ['']
2152 fstate = ['']
2156 count = [0, 0]
2153 count = [0, 0]
2157
2154
2158 self.hook('preoutgoing', throw=True, source=source)
2155 self.hook('preoutgoing', throw=True, source=source)
2159 self.changegroupinfo(nodes, source)
2156 self.changegroupinfo(nodes, source)
2160
2157
2161 revset = set([cl.rev(n) for n in nodes])
2158 revset = set([cl.rev(n) for n in nodes])
2162
2159
2163 def gennodelst(log):
2160 def gennodelst(log):
2164 ln, llr = log.node, log.linkrev
2161 ln, llr = log.node, log.linkrev
2165 return [ln(r) for r in log if llr(r) in revset]
2162 return [ln(r) for r in log if llr(r) in revset]
2166
2163
2167 progress = self.ui.progress
2164 progress = self.ui.progress
2168 _bundling = _('bundling')
2165 _bundling = _('bundling')
2169 _changesets = _('changesets')
2166 _changesets = _('changesets')
2170 _manifests = _('manifests')
2167 _manifests = _('manifests')
2171 _files = _('files')
2168 _files = _('files')
2172
2169
2173 def lookup(revlog, x):
2170 def lookup(revlog, x):
2174 if revlog == cl:
2171 if revlog == cl:
2175 c = cl.read(x)
2172 c = cl.read(x)
2176 changedfiles.update(c[3])
2173 changedfiles.update(c[3])
2177 mfs.setdefault(c[0], x)
2174 mfs.setdefault(c[0], x)
2178 count[0] += 1
2175 count[0] += 1
2179 progress(_bundling, count[0],
2176 progress(_bundling, count[0],
2180 unit=_changesets, total=count[1])
2177 unit=_changesets, total=count[1])
2181 return x
2178 return x
2182 elif revlog == mf:
2179 elif revlog == mf:
2183 count[0] += 1
2180 count[0] += 1
2184 progress(_bundling, count[0],
2181 progress(_bundling, count[0],
2185 unit=_manifests, total=count[1])
2182 unit=_manifests, total=count[1])
2186 return cl.node(revlog.linkrev(revlog.rev(x)))
2183 return cl.node(revlog.linkrev(revlog.rev(x)))
2187 else:
2184 else:
2188 progress(_bundling, count[0], item=fstate[0],
2185 progress(_bundling, count[0], item=fstate[0],
2189 total=count[1], unit=_files)
2186 total=count[1], unit=_files)
2190 return cl.node(revlog.linkrev(revlog.rev(x)))
2187 return cl.node(revlog.linkrev(revlog.rev(x)))
2191
2188
2192 bundler = changegroup.bundle10(lookup)
2189 bundler = changegroup.bundle10(lookup)
2193 reorder = self.ui.config('bundle', 'reorder', 'auto')
2190 reorder = self.ui.config('bundle', 'reorder', 'auto')
2194 if reorder == 'auto':
2191 if reorder == 'auto':
2195 reorder = None
2192 reorder = None
2196 else:
2193 else:
2197 reorder = util.parsebool(reorder)
2194 reorder = util.parsebool(reorder)
2198
2195
2199 def gengroup():
2196 def gengroup():
2200 '''yield a sequence of changegroup chunks (strings)'''
2197 '''yield a sequence of changegroup chunks (strings)'''
2201 # construct a list of all changed files
2198 # construct a list of all changed files
2202
2199
2203 count[:] = [0, len(nodes)]
2200 count[:] = [0, len(nodes)]
2204 for chunk in cl.group(nodes, bundler, reorder=reorder):
2201 for chunk in cl.group(nodes, bundler, reorder=reorder):
2205 yield chunk
2202 yield chunk
2206 progress(_bundling, None)
2203 progress(_bundling, None)
2207
2204
2208 count[:] = [0, len(mfs)]
2205 count[:] = [0, len(mfs)]
2209 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2206 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2210 yield chunk
2207 yield chunk
2211 progress(_bundling, None)
2208 progress(_bundling, None)
2212
2209
2213 count[:] = [0, len(changedfiles)]
2210 count[:] = [0, len(changedfiles)]
2214 for fname in sorted(changedfiles):
2211 for fname in sorted(changedfiles):
2215 filerevlog = self.file(fname)
2212 filerevlog = self.file(fname)
2216 if not len(filerevlog):
2213 if not len(filerevlog):
2217 raise util.Abort(_("empty or missing revlog for %s")
2214 raise util.Abort(_("empty or missing revlog for %s")
2218 % fname)
2215 % fname)
2219 fstate[0] = fname
2216 fstate[0] = fname
2220 nodelist = gennodelst(filerevlog)
2217 nodelist = gennodelst(filerevlog)
2221 if nodelist:
2218 if nodelist:
2222 count[0] += 1
2219 count[0] += 1
2223 yield bundler.fileheader(fname)
2220 yield bundler.fileheader(fname)
2224 for chunk in filerevlog.group(nodelist, bundler, reorder):
2221 for chunk in filerevlog.group(nodelist, bundler, reorder):
2225 yield chunk
2222 yield chunk
2226 yield bundler.close()
2223 yield bundler.close()
2227 progress(_bundling, None)
2224 progress(_bundling, None)
2228
2225
2229 if nodes:
2226 if nodes:
2230 self.hook('outgoing', node=hex(nodes[0]), source=source)
2227 self.hook('outgoing', node=hex(nodes[0]), source=source)
2231
2228
2232 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2229 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2233
2230
2234 def addchangegroup(self, source, srctype, url, emptyok=False):
2231 def addchangegroup(self, source, srctype, url, emptyok=False):
2235 """Add the changegroup returned by source.read() to this repo.
2232 """Add the changegroup returned by source.read() to this repo.
2236 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2233 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2237 the URL of the repo where this changegroup is coming from.
2234 the URL of the repo where this changegroup is coming from.
2238
2235
2239 Return an integer summarizing the change to this repo:
2236 Return an integer summarizing the change to this repo:
2240 - nothing changed or no source: 0
2237 - nothing changed or no source: 0
2241 - more heads than before: 1+added heads (2..n)
2238 - more heads than before: 1+added heads (2..n)
2242 - fewer heads than before: -1-removed heads (-2..-n)
2239 - fewer heads than before: -1-removed heads (-2..-n)
2243 - number of heads stays the same: 1
2240 - number of heads stays the same: 1
2244 """
2241 """
2245 def csmap(x):
2242 def csmap(x):
2246 self.ui.debug("add changeset %s\n" % short(x))
2243 self.ui.debug("add changeset %s\n" % short(x))
2247 return len(cl)
2244 return len(cl)
2248
2245
2249 def revmap(x):
2246 def revmap(x):
2250 return cl.rev(x)
2247 return cl.rev(x)
2251
2248
2252 if not source:
2249 if not source:
2253 return 0
2250 return 0
2254
2251
2255 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2252 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2256
2253
2257 changesets = files = revisions = 0
2254 changesets = files = revisions = 0
2258 efiles = set()
2255 efiles = set()
2259
2256
2260 # write changelog data to temp files so concurrent readers will not see
2257 # write changelog data to temp files so concurrent readers will not see
2261 # inconsistent view
2258 # inconsistent view
2262 cl = self.changelog
2259 cl = self.changelog
2263 cl.delayupdate()
2260 cl.delayupdate()
2264 oldheads = cl.heads()
2261 oldheads = cl.heads()
2265
2262
2266 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2263 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2267 try:
2264 try:
2268 trp = weakref.proxy(tr)
2265 trp = weakref.proxy(tr)
2269 # pull off the changeset group
2266 # pull off the changeset group
2270 self.ui.status(_("adding changesets\n"))
2267 self.ui.status(_("adding changesets\n"))
2271 clstart = len(cl)
2268 clstart = len(cl)
2272 class prog(object):
2269 class prog(object):
2273 step = _('changesets')
2270 step = _('changesets')
2274 count = 1
2271 count = 1
2275 ui = self.ui
2272 ui = self.ui
2276 total = None
2273 total = None
2277 def __call__(self):
2274 def __call__(self):
2278 self.ui.progress(self.step, self.count, unit=_('chunks'),
2275 self.ui.progress(self.step, self.count, unit=_('chunks'),
2279 total=self.total)
2276 total=self.total)
2280 self.count += 1
2277 self.count += 1
2281 pr = prog()
2278 pr = prog()
2282 source.callback = pr
2279 source.callback = pr
2283
2280
2284 source.changelogheader()
2281 source.changelogheader()
2285 srccontent = cl.addgroup(source, csmap, trp)
2282 srccontent = cl.addgroup(source, csmap, trp)
2286 if not (srccontent or emptyok):
2283 if not (srccontent or emptyok):
2287 raise util.Abort(_("received changelog group is empty"))
2284 raise util.Abort(_("received changelog group is empty"))
2288 clend = len(cl)
2285 clend = len(cl)
2289 changesets = clend - clstart
2286 changesets = clend - clstart
2290 for c in xrange(clstart, clend):
2287 for c in xrange(clstart, clend):
2291 efiles.update(self[c].files())
2288 efiles.update(self[c].files())
2292 efiles = len(efiles)
2289 efiles = len(efiles)
2293 self.ui.progress(_('changesets'), None)
2290 self.ui.progress(_('changesets'), None)
2294
2291
2295 # pull off the manifest group
2292 # pull off the manifest group
2296 self.ui.status(_("adding manifests\n"))
2293 self.ui.status(_("adding manifests\n"))
2297 pr.step = _('manifests')
2294 pr.step = _('manifests')
2298 pr.count = 1
2295 pr.count = 1
2299 pr.total = changesets # manifests <= changesets
2296 pr.total = changesets # manifests <= changesets
2300 # no need to check for empty manifest group here:
2297 # no need to check for empty manifest group here:
2301 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2298 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2302 # no new manifest will be created and the manifest group will
2299 # no new manifest will be created and the manifest group will
2303 # be empty during the pull
2300 # be empty during the pull
2304 source.manifestheader()
2301 source.manifestheader()
2305 self.manifest.addgroup(source, revmap, trp)
2302 self.manifest.addgroup(source, revmap, trp)
2306 self.ui.progress(_('manifests'), None)
2303 self.ui.progress(_('manifests'), None)
2307
2304
2308 needfiles = {}
2305 needfiles = {}
2309 if self.ui.configbool('server', 'validate', default=False):
2306 if self.ui.configbool('server', 'validate', default=False):
2310 # validate incoming csets have their manifests
2307 # validate incoming csets have their manifests
2311 for cset in xrange(clstart, clend):
2308 for cset in xrange(clstart, clend):
2312 mfest = self.changelog.read(self.changelog.node(cset))[0]
2309 mfest = self.changelog.read(self.changelog.node(cset))[0]
2313 mfest = self.manifest.readdelta(mfest)
2310 mfest = self.manifest.readdelta(mfest)
2314 # store file nodes we must see
2311 # store file nodes we must see
2315 for f, n in mfest.iteritems():
2312 for f, n in mfest.iteritems():
2316 needfiles.setdefault(f, set()).add(n)
2313 needfiles.setdefault(f, set()).add(n)
2317
2314
2318 # process the files
2315 # process the files
2319 self.ui.status(_("adding file changes\n"))
2316 self.ui.status(_("adding file changes\n"))
2320 pr.step = _('files')
2317 pr.step = _('files')
2321 pr.count = 1
2318 pr.count = 1
2322 pr.total = efiles
2319 pr.total = efiles
2323 source.callback = None
2320 source.callback = None
2324
2321
2325 while True:
2322 while True:
2326 chunkdata = source.filelogheader()
2323 chunkdata = source.filelogheader()
2327 if not chunkdata:
2324 if not chunkdata:
2328 break
2325 break
2329 f = chunkdata["filename"]
2326 f = chunkdata["filename"]
2330 self.ui.debug("adding %s revisions\n" % f)
2327 self.ui.debug("adding %s revisions\n" % f)
2331 pr()
2328 pr()
2332 fl = self.file(f)
2329 fl = self.file(f)
2333 o = len(fl)
2330 o = len(fl)
2334 if not fl.addgroup(source, revmap, trp):
2331 if not fl.addgroup(source, revmap, trp):
2335 raise util.Abort(_("received file revlog group is empty"))
2332 raise util.Abort(_("received file revlog group is empty"))
2336 revisions += len(fl) - o
2333 revisions += len(fl) - o
2337 files += 1
2334 files += 1
2338 if f in needfiles:
2335 if f in needfiles:
2339 needs = needfiles[f]
2336 needs = needfiles[f]
2340 for new in xrange(o, len(fl)):
2337 for new in xrange(o, len(fl)):
2341 n = fl.node(new)
2338 n = fl.node(new)
2342 if n in needs:
2339 if n in needs:
2343 needs.remove(n)
2340 needs.remove(n)
2344 if not needs:
2341 if not needs:
2345 del needfiles[f]
2342 del needfiles[f]
2346 self.ui.progress(_('files'), None)
2343 self.ui.progress(_('files'), None)
2347
2344
2348 for f, needs in needfiles.iteritems():
2345 for f, needs in needfiles.iteritems():
2349 fl = self.file(f)
2346 fl = self.file(f)
2350 for n in needs:
2347 for n in needs:
2351 try:
2348 try:
2352 fl.rev(n)
2349 fl.rev(n)
2353 except error.LookupError:
2350 except error.LookupError:
2354 raise util.Abort(
2351 raise util.Abort(
2355 _('missing file data for %s:%s - run hg verify') %
2352 _('missing file data for %s:%s - run hg verify') %
2356 (f, hex(n)))
2353 (f, hex(n)))
2357
2354
2358 dh = 0
2355 dh = 0
2359 if oldheads:
2356 if oldheads:
2360 heads = cl.heads()
2357 heads = cl.heads()
2361 dh = len(heads) - len(oldheads)
2358 dh = len(heads) - len(oldheads)
2362 for h in heads:
2359 for h in heads:
2363 if h not in oldheads and self[h].closesbranch():
2360 if h not in oldheads and self[h].closesbranch():
2364 dh -= 1
2361 dh -= 1
2365 htext = ""
2362 htext = ""
2366 if dh:
2363 if dh:
2367 htext = _(" (%+d heads)") % dh
2364 htext = _(" (%+d heads)") % dh
2368
2365
2369 self.ui.status(_("added %d changesets"
2366 self.ui.status(_("added %d changesets"
2370 " with %d changes to %d files%s\n")
2367 " with %d changes to %d files%s\n")
2371 % (changesets, revisions, files, htext))
2368 % (changesets, revisions, files, htext))
2372
2369
2373 if changesets > 0:
2370 if changesets > 0:
2374 p = lambda: cl.writepending() and self.root or ""
2371 p = lambda: cl.writepending() and self.root or ""
2375 self.hook('pretxnchangegroup', throw=True,
2372 self.hook('pretxnchangegroup', throw=True,
2376 node=hex(cl.node(clstart)), source=srctype,
2373 node=hex(cl.node(clstart)), source=srctype,
2377 url=url, pending=p)
2374 url=url, pending=p)
2378
2375
2379 added = [cl.node(r) for r in xrange(clstart, clend)]
2376 added = [cl.node(r) for r in xrange(clstart, clend)]
2380 publishing = self.ui.configbool('phases', 'publish', True)
2377 publishing = self.ui.configbool('phases', 'publish', True)
2381 if srctype == 'push':
2378 if srctype == 'push':
2382 # Old server can not push the boundary themself.
2379 # Old server can not push the boundary themself.
2383 # New server won't push the boundary if changeset already
2380 # New server won't push the boundary if changeset already
2384 # existed locally as secrete
2381 # existed locally as secrete
2385 #
2382 #
2386 # We should not use added here but the list of all change in
2383 # We should not use added here but the list of all change in
2387 # the bundle
2384 # the bundle
2388 if publishing:
2385 if publishing:
2389 phases.advanceboundary(self, phases.public, srccontent)
2386 phases.advanceboundary(self, phases.public, srccontent)
2390 else:
2387 else:
2391 phases.advanceboundary(self, phases.draft, srccontent)
2388 phases.advanceboundary(self, phases.draft, srccontent)
2392 phases.retractboundary(self, phases.draft, added)
2389 phases.retractboundary(self, phases.draft, added)
2393 elif srctype != 'strip':
2390 elif srctype != 'strip':
2394 # publishing only alter behavior during push
2391 # publishing only alter behavior during push
2395 #
2392 #
2396 # strip should not touch boundary at all
2393 # strip should not touch boundary at all
2397 phases.retractboundary(self, phases.draft, added)
2394 phases.retractboundary(self, phases.draft, added)
2398
2395
2399 # make changelog see real files again
2396 # make changelog see real files again
2400 cl.finalize(trp)
2397 cl.finalize(trp)
2401
2398
2402 tr.close()
2399 tr.close()
2403
2400
2404 if changesets > 0:
2401 if changesets > 0:
2405 def runhooks():
2402 def runhooks():
2406 # forcefully update the on-disk branch cache
2403 # forcefully update the on-disk branch cache
2407 self.ui.debug("updating the branch cache\n")
2404 self.ui.debug("updating the branch cache\n")
2408 self.updatebranchcache()
2405 self.updatebranchcache()
2409 self.hook("changegroup", node=hex(cl.node(clstart)),
2406 self.hook("changegroup", node=hex(cl.node(clstart)),
2410 source=srctype, url=url)
2407 source=srctype, url=url)
2411
2408
2412 for n in added:
2409 for n in added:
2413 self.hook("incoming", node=hex(n), source=srctype,
2410 self.hook("incoming", node=hex(n), source=srctype,
2414 url=url)
2411 url=url)
2415 self._afterlock(runhooks)
2412 self._afterlock(runhooks)
2416
2413
2417 finally:
2414 finally:
2418 tr.release()
2415 tr.release()
2419 # never return 0 here:
2416 # never return 0 here:
2420 if dh < 0:
2417 if dh < 0:
2421 return dh - 1
2418 return dh - 1
2422 else:
2419 else:
2423 return dh + 1
2420 return dh + 1
2424
2421
2425 def stream_in(self, remote, requirements):
2422 def stream_in(self, remote, requirements):
2426 lock = self.lock()
2423 lock = self.lock()
2427 try:
2424 try:
2428 fp = remote.stream_out()
2425 fp = remote.stream_out()
2429 l = fp.readline()
2426 l = fp.readline()
2430 try:
2427 try:
2431 resp = int(l)
2428 resp = int(l)
2432 except ValueError:
2429 except ValueError:
2433 raise error.ResponseError(
2430 raise error.ResponseError(
2434 _('unexpected response from remote server:'), l)
2431 _('unexpected response from remote server:'), l)
2435 if resp == 1:
2432 if resp == 1:
2436 raise util.Abort(_('operation forbidden by server'))
2433 raise util.Abort(_('operation forbidden by server'))
2437 elif resp == 2:
2434 elif resp == 2:
2438 raise util.Abort(_('locking the remote repository failed'))
2435 raise util.Abort(_('locking the remote repository failed'))
2439 elif resp != 0:
2436 elif resp != 0:
2440 raise util.Abort(_('the server sent an unknown error code'))
2437 raise util.Abort(_('the server sent an unknown error code'))
2441 self.ui.status(_('streaming all changes\n'))
2438 self.ui.status(_('streaming all changes\n'))
2442 l = fp.readline()
2439 l = fp.readline()
2443 try:
2440 try:
2444 total_files, total_bytes = map(int, l.split(' ', 1))
2441 total_files, total_bytes = map(int, l.split(' ', 1))
2445 except (ValueError, TypeError):
2442 except (ValueError, TypeError):
2446 raise error.ResponseError(
2443 raise error.ResponseError(
2447 _('unexpected response from remote server:'), l)
2444 _('unexpected response from remote server:'), l)
2448 self.ui.status(_('%d files to transfer, %s of data\n') %
2445 self.ui.status(_('%d files to transfer, %s of data\n') %
2449 (total_files, util.bytecount(total_bytes)))
2446 (total_files, util.bytecount(total_bytes)))
2450 handled_bytes = 0
2447 handled_bytes = 0
2451 self.ui.progress(_('clone'), 0, total=total_bytes)
2448 self.ui.progress(_('clone'), 0, total=total_bytes)
2452 start = time.time()
2449 start = time.time()
2453 for i in xrange(total_files):
2450 for i in xrange(total_files):
2454 # XXX doesn't support '\n' or '\r' in filenames
2451 # XXX doesn't support '\n' or '\r' in filenames
2455 l = fp.readline()
2452 l = fp.readline()
2456 try:
2453 try:
2457 name, size = l.split('\0', 1)
2454 name, size = l.split('\0', 1)
2458 size = int(size)
2455 size = int(size)
2459 except (ValueError, TypeError):
2456 except (ValueError, TypeError):
2460 raise error.ResponseError(
2457 raise error.ResponseError(
2461 _('unexpected response from remote server:'), l)
2458 _('unexpected response from remote server:'), l)
2462 if self.ui.debugflag:
2459 if self.ui.debugflag:
2463 self.ui.debug('adding %s (%s)\n' %
2460 self.ui.debug('adding %s (%s)\n' %
2464 (name, util.bytecount(size)))
2461 (name, util.bytecount(size)))
2465 # for backwards compat, name was partially encoded
2462 # for backwards compat, name was partially encoded
2466 ofp = self.sopener(store.decodedir(name), 'w')
2463 ofp = self.sopener(store.decodedir(name), 'w')
2467 for chunk in util.filechunkiter(fp, limit=size):
2464 for chunk in util.filechunkiter(fp, limit=size):
2468 handled_bytes += len(chunk)
2465 handled_bytes += len(chunk)
2469 self.ui.progress(_('clone'), handled_bytes,
2466 self.ui.progress(_('clone'), handled_bytes,
2470 total=total_bytes)
2467 total=total_bytes)
2471 ofp.write(chunk)
2468 ofp.write(chunk)
2472 ofp.close()
2469 ofp.close()
2473 elapsed = time.time() - start
2470 elapsed = time.time() - start
2474 if elapsed <= 0:
2471 if elapsed <= 0:
2475 elapsed = 0.001
2472 elapsed = 0.001
2476 self.ui.progress(_('clone'), None)
2473 self.ui.progress(_('clone'), None)
2477 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2474 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2478 (util.bytecount(total_bytes), elapsed,
2475 (util.bytecount(total_bytes), elapsed,
2479 util.bytecount(total_bytes / elapsed)))
2476 util.bytecount(total_bytes / elapsed)))
2480
2477
2481 # new requirements = old non-format requirements +
2478 # new requirements = old non-format requirements +
2482 # new format-related
2479 # new format-related
2483 # requirements from the streamed-in repository
2480 # requirements from the streamed-in repository
2484 requirements.update(set(self.requirements) - self.supportedformats)
2481 requirements.update(set(self.requirements) - self.supportedformats)
2485 self._applyrequirements(requirements)
2482 self._applyrequirements(requirements)
2486 self._writerequirements()
2483 self._writerequirements()
2487
2484
2488 self.invalidate()
2485 self.invalidate()
2489 return len(self.heads()) + 1
2486 return len(self.heads()) + 1
2490 finally:
2487 finally:
2491 lock.release()
2488 lock.release()
2492
2489
2493 def clone(self, remote, heads=[], stream=False):
2490 def clone(self, remote, heads=[], stream=False):
2494 '''clone remote repository.
2491 '''clone remote repository.
2495
2492
2496 keyword arguments:
2493 keyword arguments:
2497 heads: list of revs to clone (forces use of pull)
2494 heads: list of revs to clone (forces use of pull)
2498 stream: use streaming clone if possible'''
2495 stream: use streaming clone if possible'''
2499
2496
2500 # now, all clients that can request uncompressed clones can
2497 # now, all clients that can request uncompressed clones can
2501 # read repo formats supported by all servers that can serve
2498 # read repo formats supported by all servers that can serve
2502 # them.
2499 # them.
2503
2500
2504 # if revlog format changes, client will have to check version
2501 # if revlog format changes, client will have to check version
2505 # and format flags on "stream" capability, and use
2502 # and format flags on "stream" capability, and use
2506 # uncompressed only if compatible.
2503 # uncompressed only if compatible.
2507
2504
2508 if not stream:
2505 if not stream:
2509 # if the server explicitely prefer to stream (for fast LANs)
2506 # if the server explicitely prefer to stream (for fast LANs)
2510 stream = remote.capable('stream-preferred')
2507 stream = remote.capable('stream-preferred')
2511
2508
2512 if stream and not heads:
2509 if stream and not heads:
2513 # 'stream' means remote revlog format is revlogv1 only
2510 # 'stream' means remote revlog format is revlogv1 only
2514 if remote.capable('stream'):
2511 if remote.capable('stream'):
2515 return self.stream_in(remote, set(('revlogv1',)))
2512 return self.stream_in(remote, set(('revlogv1',)))
2516 # otherwise, 'streamreqs' contains the remote revlog format
2513 # otherwise, 'streamreqs' contains the remote revlog format
2517 streamreqs = remote.capable('streamreqs')
2514 streamreqs = remote.capable('streamreqs')
2518 if streamreqs:
2515 if streamreqs:
2519 streamreqs = set(streamreqs.split(','))
2516 streamreqs = set(streamreqs.split(','))
2520 # if we support it, stream in and adjust our requirements
2517 # if we support it, stream in and adjust our requirements
2521 if not streamreqs - self.supportedformats:
2518 if not streamreqs - self.supportedformats:
2522 return self.stream_in(remote, streamreqs)
2519 return self.stream_in(remote, streamreqs)
2523 return self.pull(remote, heads)
2520 return self.pull(remote, heads)
2524
2521
2525 def pushkey(self, namespace, key, old, new):
2522 def pushkey(self, namespace, key, old, new):
2526 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2523 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2527 old=old, new=new)
2524 old=old, new=new)
2528 ret = pushkey.push(self, namespace, key, old, new)
2525 ret = pushkey.push(self, namespace, key, old, new)
2529 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2526 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2530 ret=ret)
2527 ret=ret)
2531 return ret
2528 return ret
2532
2529
2533 def listkeys(self, namespace):
2530 def listkeys(self, namespace):
2534 self.hook('prelistkeys', throw=True, namespace=namespace)
2531 self.hook('prelistkeys', throw=True, namespace=namespace)
2535 values = pushkey.list(self, namespace)
2532 values = pushkey.list(self, namespace)
2536 self.hook('listkeys', namespace=namespace, values=values)
2533 self.hook('listkeys', namespace=namespace, values=values)
2537 return values
2534 return values
2538
2535
2539 def debugwireargs(self, one, two, three=None, four=None, five=None):
2536 def debugwireargs(self, one, two, three=None, four=None, five=None):
2540 '''used to test argument passing over the wire'''
2537 '''used to test argument passing over the wire'''
2541 return "%s %s %s %s %s" % (one, two, three, four, five)
2538 return "%s %s %s %s %s" % (one, two, three, four, five)
2542
2539
2543 def savecommitmessage(self, text):
2540 def savecommitmessage(self, text):
2544 fp = self.opener('last-message.txt', 'wb')
2541 fp = self.opener('last-message.txt', 'wb')
2545 try:
2542 try:
2546 fp.write(text)
2543 fp.write(text)
2547 finally:
2544 finally:
2548 fp.close()
2545 fp.close()
2549 return self.pathto(fp.name[len(self.root)+1:])
2546 return self.pathto(fp.name[len(self.root)+1:])
2550
2547
2551 # used to avoid circular references so destructors work
2548 # used to avoid circular references so destructors work
2552 def aftertrans(files):
2549 def aftertrans(files):
2553 renamefiles = [tuple(t) for t in files]
2550 renamefiles = [tuple(t) for t in files]
2554 def a():
2551 def a():
2555 for src, dest in renamefiles:
2552 for src, dest in renamefiles:
2556 try:
2553 try:
2557 util.rename(src, dest)
2554 util.rename(src, dest)
2558 except OSError: # journal file does not yet exist
2555 except OSError: # journal file does not yet exist
2559 pass
2556 pass
2560 return a
2557 return a
2561
2558
2562 def undoname(fn):
2559 def undoname(fn):
2563 base, name = os.path.split(fn)
2560 base, name = os.path.split(fn)
2564 assert name.startswith('journal')
2561 assert name.startswith('journal')
2565 return os.path.join(base, name.replace('journal', 'undo', 1))
2562 return os.path.join(base, name.replace('journal', 'undo', 1))
2566
2563
2567 def instance(ui, path, create):
2564 def instance(ui, path, create):
2568 return localrepository(ui, util.urllocalpath(path), create)
2565 return localrepository(ui, util.urllocalpath(path), create)
2569
2566
2570 def islocal(path):
2567 def islocal(path):
2571 return True
2568 return True
@@ -1,52 +1,49
1 # peer.py - repository base classes for mercurial
1 # peer.py - repository base classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 import error
10 import error
11
11
12 class peerrepository(object):
12 class peerrepository(object):
13
13
14 def capable(self, name):
14 def capable(self, name):
15 '''tell whether repo supports named capability.
15 '''tell whether repo supports named capability.
16 return False if not supported.
16 return False if not supported.
17 if boolean capability, return True.
17 if boolean capability, return True.
18 if string capability, return string.'''
18 if string capability, return string.'''
19 caps = self._capabilities()
19 caps = self._capabilities()
20 if name in caps:
20 if name in caps:
21 return True
21 return True
22 name_eq = name + '='
22 name_eq = name + '='
23 for cap in caps:
23 for cap in caps:
24 if cap.startswith(name_eq):
24 if cap.startswith(name_eq):
25 return cap[len(name_eq):]
25 return cap[len(name_eq):]
26 return False
26 return False
27
27
28 def requirecap(self, name, purpose):
28 def requirecap(self, name, purpose):
29 '''raise an exception if the given capability is not present'''
29 '''raise an exception if the given capability is not present'''
30 if not self.capable(name):
30 if not self.capable(name):
31 raise error.CapabilityError(
31 raise error.CapabilityError(
32 _('cannot %s; remote repository does not '
32 _('cannot %s; remote repository does not '
33 'support the %r capability') % (purpose, name))
33 'support the %r capability') % (purpose, name))
34
34
35 def local(self):
35 def local(self):
36 '''return peer as a localrepo, or None'''
36 '''return peer as a localrepo, or None'''
37 return None
37 return None
38
38
39 def peer(self):
39 def peer(self):
40 return self
40 return self
41
41
42 def peer(self):
42 def peer(self):
43 return self
43 return self
44
44
45 def cancopy(self):
46 return False
47
48 def canpush(self):
45 def canpush(self):
49 return True
46 return True
50
47
51 def close(self):
48 def close(self):
52 pass
49 pass
General Comments 0
You need to be logged in to leave comments. Login now