##// END OF EJS Templates
branchcache: fetch source branchcache during clone (issue3378)...
Tomasz Kleczek -
r17740:e6067bec default
parent child Browse files
Show More
@@ -1,600 +1,610 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 from node import hex, nullid
11 from node import hex, nullid
12 import localrepo, bundlerepo, httppeer, sshpeer, statichttprepo, bookmarks
12 import localrepo, bundlerepo, httppeer, sshpeer, statichttprepo, bookmarks
13 import lock, util, extensions, error, node, scmutil, phases
13 import lock, util, extensions, error, node, scmutil, phases
14 import cmdutil, discovery
14 import cmdutil, discovery
15 import merge as mergemod
15 import merge as mergemod
16 import verify as verifymod
16 import verify as verifymod
17 import errno, os, shutil
17 import errno, os, shutil
18
18
19 def _local(path):
19 def _local(path):
20 path = util.expandpath(util.urllocalpath(path))
20 path = util.expandpath(util.urllocalpath(path))
21 return (os.path.isfile(path) and bundlerepo or localrepo)
21 return (os.path.isfile(path) and bundlerepo or localrepo)
22
22
23 def addbranchrevs(lrepo, other, branches, revs):
23 def addbranchrevs(lrepo, other, branches, revs):
24 peer = other.peer() # a courtesy to callers using a localrepo for other
24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 hashbranch, branches = branches
25 hashbranch, branches = branches
26 if not hashbranch and not branches:
26 if not hashbranch and not branches:
27 return revs or None, revs and revs[0] or None
27 return revs or None, revs and revs[0] or None
28 revs = revs and list(revs) or []
28 revs = revs and list(revs) or []
29 if not peer.capable('branchmap'):
29 if not peer.capable('branchmap'):
30 if branches:
30 if branches:
31 raise util.Abort(_("remote branch lookup not supported"))
31 raise util.Abort(_("remote branch lookup not supported"))
32 revs.append(hashbranch)
32 revs.append(hashbranch)
33 return revs, revs[0]
33 return revs, revs[0]
34 branchmap = peer.branchmap()
34 branchmap = peer.branchmap()
35
35
36 def primary(branch):
36 def primary(branch):
37 if branch == '.':
37 if branch == '.':
38 if not lrepo:
38 if not lrepo:
39 raise util.Abort(_("dirstate branch not accessible"))
39 raise util.Abort(_("dirstate branch not accessible"))
40 branch = lrepo.dirstate.branch()
40 branch = lrepo.dirstate.branch()
41 if branch in branchmap:
41 if branch in branchmap:
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 return True
43 return True
44 else:
44 else:
45 return False
45 return False
46
46
47 for branch in branches:
47 for branch in branches:
48 if not primary(branch):
48 if not primary(branch):
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 if hashbranch:
50 if hashbranch:
51 if not primary(hashbranch):
51 if not primary(hashbranch):
52 revs.append(hashbranch)
52 revs.append(hashbranch)
53 return revs, revs[0]
53 return revs, revs[0]
54
54
55 def parseurl(path, branches=None):
55 def parseurl(path, branches=None):
56 '''parse url#branch, returning (url, (branch, branches))'''
56 '''parse url#branch, returning (url, (branch, branches))'''
57
57
58 u = util.url(path)
58 u = util.url(path)
59 branch = None
59 branch = None
60 if u.fragment:
60 if u.fragment:
61 branch = u.fragment
61 branch = u.fragment
62 u.fragment = None
62 u.fragment = None
63 return str(u), (branch, branches or [])
63 return str(u), (branch, branches or [])
64
64
65 schemes = {
65 schemes = {
66 'bundle': bundlerepo,
66 'bundle': bundlerepo,
67 'file': _local,
67 'file': _local,
68 'http': httppeer,
68 'http': httppeer,
69 'https': httppeer,
69 'https': httppeer,
70 'ssh': sshpeer,
70 'ssh': sshpeer,
71 'static-http': statichttprepo,
71 'static-http': statichttprepo,
72 }
72 }
73
73
74 def _peerlookup(path):
74 def _peerlookup(path):
75 u = util.url(path)
75 u = util.url(path)
76 scheme = u.scheme or 'file'
76 scheme = u.scheme or 'file'
77 thing = schemes.get(scheme) or schemes['file']
77 thing = schemes.get(scheme) or schemes['file']
78 try:
78 try:
79 return thing(path)
79 return thing(path)
80 except TypeError:
80 except TypeError:
81 return thing
81 return thing
82
82
83 def islocal(repo):
83 def islocal(repo):
84 '''return true if repo or path is local'''
84 '''return true if repo or path is local'''
85 if isinstance(repo, str):
85 if isinstance(repo, str):
86 try:
86 try:
87 return _peerlookup(repo).islocal(repo)
87 return _peerlookup(repo).islocal(repo)
88 except AttributeError:
88 except AttributeError:
89 return False
89 return False
90 return repo.local()
90 return repo.local()
91
91
92 def _peerorrepo(ui, path, create=False):
92 def _peerorrepo(ui, path, create=False):
93 """return a repository object for the specified path"""
93 """return a repository object for the specified path"""
94 obj = _peerlookup(path).instance(ui, path, create)
94 obj = _peerlookup(path).instance(ui, path, create)
95 ui = getattr(obj, "ui", ui)
95 ui = getattr(obj, "ui", ui)
96 for name, module in extensions.extensions():
96 for name, module in extensions.extensions():
97 hook = getattr(module, 'reposetup', None)
97 hook = getattr(module, 'reposetup', None)
98 if hook:
98 if hook:
99 hook(ui, obj)
99 hook(ui, obj)
100 return obj
100 return obj
101
101
102 def repository(ui, path='', create=False):
102 def repository(ui, path='', create=False):
103 """return a repository object for the specified path"""
103 """return a repository object for the specified path"""
104 peer = _peerorrepo(ui, path, create)
104 peer = _peerorrepo(ui, path, create)
105 repo = peer.local()
105 repo = peer.local()
106 if not repo:
106 if not repo:
107 raise util.Abort(_("repository '%s' is not local") %
107 raise util.Abort(_("repository '%s' is not local") %
108 (path or peer.url()))
108 (path or peer.url()))
109 return repo
109 return repo
110
110
111 def peer(uiorrepo, opts, path, create=False):
111 def peer(uiorrepo, opts, path, create=False):
112 '''return a repository peer for the specified path'''
112 '''return a repository peer for the specified path'''
113 rui = remoteui(uiorrepo, opts)
113 rui = remoteui(uiorrepo, opts)
114 return _peerorrepo(rui, path, create).peer()
114 return _peerorrepo(rui, path, create).peer()
115
115
116 def defaultdest(source):
116 def defaultdest(source):
117 '''return default destination of clone if none is given'''
117 '''return default destination of clone if none is given'''
118 return os.path.basename(os.path.normpath(source))
118 return os.path.basename(os.path.normpath(source))
119
119
120 def share(ui, source, dest=None, update=True):
120 def share(ui, source, dest=None, update=True):
121 '''create a shared repository'''
121 '''create a shared repository'''
122
122
123 if not islocal(source):
123 if not islocal(source):
124 raise util.Abort(_('can only share local repositories'))
124 raise util.Abort(_('can only share local repositories'))
125
125
126 if not dest:
126 if not dest:
127 dest = defaultdest(source)
127 dest = defaultdest(source)
128 else:
128 else:
129 dest = ui.expandpath(dest)
129 dest = ui.expandpath(dest)
130
130
131 if isinstance(source, str):
131 if isinstance(source, str):
132 origsource = ui.expandpath(source)
132 origsource = ui.expandpath(source)
133 source, branches = parseurl(origsource)
133 source, branches = parseurl(origsource)
134 srcrepo = repository(ui, source)
134 srcrepo = repository(ui, source)
135 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
135 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
136 else:
136 else:
137 srcrepo = source.local()
137 srcrepo = source.local()
138 origsource = source = srcrepo.url()
138 origsource = source = srcrepo.url()
139 checkout = None
139 checkout = None
140
140
141 sharedpath = srcrepo.sharedpath # if our source is already sharing
141 sharedpath = srcrepo.sharedpath # if our source is already sharing
142
142
143 root = os.path.realpath(dest)
143 root = os.path.realpath(dest)
144 roothg = os.path.join(root, '.hg')
144 roothg = os.path.join(root, '.hg')
145
145
146 if os.path.exists(roothg):
146 if os.path.exists(roothg):
147 raise util.Abort(_('destination already exists'))
147 raise util.Abort(_('destination already exists'))
148
148
149 if not os.path.isdir(root):
149 if not os.path.isdir(root):
150 os.mkdir(root)
150 os.mkdir(root)
151 util.makedir(roothg, notindexed=True)
151 util.makedir(roothg, notindexed=True)
152
152
153 requirements = ''
153 requirements = ''
154 try:
154 try:
155 requirements = srcrepo.opener.read('requires')
155 requirements = srcrepo.opener.read('requires')
156 except IOError, inst:
156 except IOError, inst:
157 if inst.errno != errno.ENOENT:
157 if inst.errno != errno.ENOENT:
158 raise
158 raise
159
159
160 requirements += 'shared\n'
160 requirements += 'shared\n'
161 util.writefile(os.path.join(roothg, 'requires'), requirements)
161 util.writefile(os.path.join(roothg, 'requires'), requirements)
162 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
162 util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
163
163
164 r = repository(ui, root)
164 r = repository(ui, root)
165
165
166 default = srcrepo.ui.config('paths', 'default')
166 default = srcrepo.ui.config('paths', 'default')
167 if default:
167 if default:
168 fp = r.opener("hgrc", "w", text=True)
168 fp = r.opener("hgrc", "w", text=True)
169 fp.write("[paths]\n")
169 fp.write("[paths]\n")
170 fp.write("default = %s\n" % default)
170 fp.write("default = %s\n" % default)
171 fp.close()
171 fp.close()
172
172
173 if update:
173 if update:
174 r.ui.status(_("updating working directory\n"))
174 r.ui.status(_("updating working directory\n"))
175 if update is not True:
175 if update is not True:
176 checkout = update
176 checkout = update
177 for test in (checkout, 'default', 'tip'):
177 for test in (checkout, 'default', 'tip'):
178 if test is None:
178 if test is None:
179 continue
179 continue
180 try:
180 try:
181 uprev = r.lookup(test)
181 uprev = r.lookup(test)
182 break
182 break
183 except error.RepoLookupError:
183 except error.RepoLookupError:
184 continue
184 continue
185 _update(r, uprev)
185 _update(r, uprev)
186
186
187 def copystore(ui, srcrepo, destpath):
187 def copystore(ui, srcrepo, destpath):
188 '''copy files from store of srcrepo in destpath
188 '''copy files from store of srcrepo in destpath
189
189
190 returns destlock
190 returns destlock
191 '''
191 '''
192 destlock = None
192 destlock = None
193 try:
193 try:
194 hardlink = None
194 hardlink = None
195 num = 0
195 num = 0
196 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
196 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
197 for f in srcrepo.store.copylist():
197 for f in srcrepo.store.copylist():
198 if srcpublishing and f.endswith('phaseroots'):
198 if srcpublishing and f.endswith('phaseroots'):
199 continue
199 continue
200 src = os.path.join(srcrepo.sharedpath, f)
200 src = os.path.join(srcrepo.sharedpath, f)
201 dst = os.path.join(destpath, f)
201 dst = os.path.join(destpath, f)
202 dstbase = os.path.dirname(dst)
202 dstbase = os.path.dirname(dst)
203 if dstbase and not os.path.exists(dstbase):
203 if dstbase and not os.path.exists(dstbase):
204 os.mkdir(dstbase)
204 os.mkdir(dstbase)
205 if os.path.exists(src):
205 if os.path.exists(src):
206 if dst.endswith('data'):
206 if dst.endswith('data'):
207 # lock to avoid premature writing to the target
207 # lock to avoid premature writing to the target
208 destlock = lock.lock(os.path.join(dstbase, "lock"))
208 destlock = lock.lock(os.path.join(dstbase, "lock"))
209 hardlink, n = util.copyfiles(src, dst, hardlink)
209 hardlink, n = util.copyfiles(src, dst, hardlink)
210 num += n
210 num += n
211 if hardlink:
211 if hardlink:
212 ui.debug("linked %d files\n" % num)
212 ui.debug("linked %d files\n" % num)
213 else:
213 else:
214 ui.debug("copied %d files\n" % num)
214 ui.debug("copied %d files\n" % num)
215 return destlock
215 return destlock
216 except: # re-raises
216 except: # re-raises
217 release(destlock)
217 release(destlock)
218 raise
218 raise
219
219
220 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
220 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
221 update=True, stream=False, branch=None):
221 update=True, stream=False, branch=None):
222 """Make a copy of an existing repository.
222 """Make a copy of an existing repository.
223
223
224 Create a copy of an existing repository in a new directory. The
224 Create a copy of an existing repository in a new directory. The
225 source and destination are URLs, as passed to the repository
225 source and destination are URLs, as passed to the repository
226 function. Returns a pair of repository peers, the source and
226 function. Returns a pair of repository peers, the source and
227 newly created destination.
227 newly created destination.
228
228
229 The location of the source is added to the new repository's
229 The location of the source is added to the new repository's
230 .hg/hgrc file, as the default to be used for future pulls and
230 .hg/hgrc file, as the default to be used for future pulls and
231 pushes.
231 pushes.
232
232
233 If an exception is raised, the partly cloned/updated destination
233 If an exception is raised, the partly cloned/updated destination
234 repository will be deleted.
234 repository will be deleted.
235
235
236 Arguments:
236 Arguments:
237
237
238 source: repository object or URL
238 source: repository object or URL
239
239
240 dest: URL of destination repository to create (defaults to base
240 dest: URL of destination repository to create (defaults to base
241 name of source repository)
241 name of source repository)
242
242
243 pull: always pull from source repository, even in local case
243 pull: always pull from source repository, even in local case
244
244
245 stream: stream raw data uncompressed from repository (fast over
245 stream: stream raw data uncompressed from repository (fast over
246 LAN, slow over WAN)
246 LAN, slow over WAN)
247
247
248 rev: revision to clone up to (implies pull=True)
248 rev: revision to clone up to (implies pull=True)
249
249
250 update: update working directory after clone completes, if
250 update: update working directory after clone completes, if
251 destination is local repository (True means update to default rev,
251 destination is local repository (True means update to default rev,
252 anything else is treated as a revision)
252 anything else is treated as a revision)
253
253
254 branch: branches to clone
254 branch: branches to clone
255 """
255 """
256
256
257 if isinstance(source, str):
257 if isinstance(source, str):
258 origsource = ui.expandpath(source)
258 origsource = ui.expandpath(source)
259 source, branch = parseurl(origsource, branch)
259 source, branch = parseurl(origsource, branch)
260 srcpeer = peer(ui, peeropts, source)
260 srcpeer = peer(ui, peeropts, source)
261 else:
261 else:
262 srcpeer = source.peer() # in case we were called with a localrepo
262 srcpeer = source.peer() # in case we were called with a localrepo
263 branch = (None, branch or [])
263 branch = (None, branch or [])
264 origsource = source = srcpeer.url()
264 origsource = source = srcpeer.url()
265 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
265 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
266
266
267 if dest is None:
267 if dest is None:
268 dest = defaultdest(source)
268 dest = defaultdest(source)
269 ui.status(_("destination directory: %s\n") % dest)
269 ui.status(_("destination directory: %s\n") % dest)
270 else:
270 else:
271 dest = ui.expandpath(dest)
271 dest = ui.expandpath(dest)
272
272
273 dest = util.urllocalpath(dest)
273 dest = util.urllocalpath(dest)
274 source = util.urllocalpath(source)
274 source = util.urllocalpath(source)
275
275
276 if not dest:
276 if not dest:
277 raise util.Abort(_("empty destination path is not valid"))
277 raise util.Abort(_("empty destination path is not valid"))
278 if os.path.exists(dest):
278 if os.path.exists(dest):
279 if not os.path.isdir(dest):
279 if not os.path.isdir(dest):
280 raise util.Abort(_("destination '%s' already exists") % dest)
280 raise util.Abort(_("destination '%s' already exists") % dest)
281 elif os.listdir(dest):
281 elif os.listdir(dest):
282 raise util.Abort(_("destination '%s' is not empty") % dest)
282 raise util.Abort(_("destination '%s' is not empty") % dest)
283
283
284 class DirCleanup(object):
284 class DirCleanup(object):
285 def __init__(self, dir_):
285 def __init__(self, dir_):
286 self.rmtree = shutil.rmtree
286 self.rmtree = shutil.rmtree
287 self.dir_ = dir_
287 self.dir_ = dir_
288 def close(self):
288 def close(self):
289 self.dir_ = None
289 self.dir_ = None
290 def cleanup(self):
290 def cleanup(self):
291 if self.dir_:
291 if self.dir_:
292 self.rmtree(self.dir_, True)
292 self.rmtree(self.dir_, True)
293
293
294 srclock = destlock = dircleanup = None
294 srclock = destlock = dircleanup = None
295 srcrepo = srcpeer.local()
295 srcrepo = srcpeer.local()
296 try:
296 try:
297 abspath = origsource
297 abspath = origsource
298 if islocal(origsource):
298 if islocal(origsource):
299 abspath = os.path.abspath(util.urllocalpath(origsource))
299 abspath = os.path.abspath(util.urllocalpath(origsource))
300
300
301 if islocal(dest):
301 if islocal(dest):
302 dircleanup = DirCleanup(dest)
302 dircleanup = DirCleanup(dest)
303
303
304 copy = False
304 copy = False
305 if (srcrepo and srcrepo.cancopy() and islocal(dest)
305 if (srcrepo and srcrepo.cancopy() and islocal(dest)
306 and not phases.hassecret(srcrepo)):
306 and not phases.hassecret(srcrepo)):
307 copy = not pull and not rev
307 copy = not pull and not rev
308
308
309 if copy:
309 if copy:
310 try:
310 try:
311 # we use a lock here because if we race with commit, we
311 # we use a lock here because if we race with commit, we
312 # can end up with extra data in the cloned revlogs that's
312 # can end up with extra data in the cloned revlogs that's
313 # not pointed to by changesets, thus causing verify to
313 # not pointed to by changesets, thus causing verify to
314 # fail
314 # fail
315 srclock = srcrepo.lock(wait=False)
315 srclock = srcrepo.lock(wait=False)
316 except error.LockError:
316 except error.LockError:
317 copy = False
317 copy = False
318
318
319 if copy:
319 if copy:
320 srcrepo.hook('preoutgoing', throw=True, source='clone')
320 srcrepo.hook('preoutgoing', throw=True, source='clone')
321 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
321 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
322 if not os.path.exists(dest):
322 if not os.path.exists(dest):
323 os.mkdir(dest)
323 os.mkdir(dest)
324 else:
324 else:
325 # only clean up directories we create ourselves
325 # only clean up directories we create ourselves
326 dircleanup.dir_ = hgdir
326 dircleanup.dir_ = hgdir
327 try:
327 try:
328 destpath = hgdir
328 destpath = hgdir
329 util.makedir(destpath, notindexed=True)
329 util.makedir(destpath, notindexed=True)
330 except OSError, inst:
330 except OSError, inst:
331 if inst.errno == errno.EEXIST:
331 if inst.errno == errno.EEXIST:
332 dircleanup.close()
332 dircleanup.close()
333 raise util.Abort(_("destination '%s' already exists")
333 raise util.Abort(_("destination '%s' already exists")
334 % dest)
334 % dest)
335 raise
335 raise
336
336
337 destlock = copystore(ui, srcrepo, destpath)
337 destlock = copystore(ui, srcrepo, destpath)
338
338
339 # Recomputing branch cache might be slow on big repos,
340 # so just copy it
341 dstcachedir = os.path.join(destpath, 'cache')
342 srcbranchcache = srcrepo.sjoin('cache/branchheads')
343 dstbranchcache = os.path.join(dstcachedir, 'branchheads')
344 if os.path.exists(srcbranchcache):
345 if not os.path.exists(dstcachedir):
346 os.mkdir(dstcachedir)
347 util.copyfile(srcbranchcache, dstbranchcache)
348
339 # we need to re-init the repo after manually copying the data
349 # we need to re-init the repo after manually copying the data
340 # into it
350 # into it
341 destpeer = peer(ui, peeropts, dest)
351 destpeer = peer(ui, peeropts, dest)
342 srcrepo.hook('outgoing', source='clone',
352 srcrepo.hook('outgoing', source='clone',
343 node=node.hex(node.nullid))
353 node=node.hex(node.nullid))
344 else:
354 else:
345 try:
355 try:
346 destpeer = peer(ui, peeropts, dest, create=True)
356 destpeer = peer(ui, peeropts, dest, create=True)
347 except OSError, inst:
357 except OSError, inst:
348 if inst.errno == errno.EEXIST:
358 if inst.errno == errno.EEXIST:
349 dircleanup.close()
359 dircleanup.close()
350 raise util.Abort(_("destination '%s' already exists")
360 raise util.Abort(_("destination '%s' already exists")
351 % dest)
361 % dest)
352 raise
362 raise
353
363
354 revs = None
364 revs = None
355 if rev:
365 if rev:
356 if not srcpeer.capable('lookup'):
366 if not srcpeer.capable('lookup'):
357 raise util.Abort(_("src repository does not support "
367 raise util.Abort(_("src repository does not support "
358 "revision lookup and so doesn't "
368 "revision lookup and so doesn't "
359 "support clone by revision"))
369 "support clone by revision"))
360 revs = [srcpeer.lookup(r) for r in rev]
370 revs = [srcpeer.lookup(r) for r in rev]
361 checkout = revs[0]
371 checkout = revs[0]
362 if destpeer.local():
372 if destpeer.local():
363 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
373 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
364 elif srcrepo:
374 elif srcrepo:
365 srcrepo.push(destpeer, revs=revs)
375 srcrepo.push(destpeer, revs=revs)
366 else:
376 else:
367 raise util.Abort(_("clone from remote to remote not supported"))
377 raise util.Abort(_("clone from remote to remote not supported"))
368
378
369 if dircleanup:
379 if dircleanup:
370 dircleanup.close()
380 dircleanup.close()
371
381
372 # clone all bookmarks except divergent ones
382 # clone all bookmarks except divergent ones
373 destrepo = destpeer.local()
383 destrepo = destpeer.local()
374 if destrepo and srcpeer.capable("pushkey"):
384 if destrepo and srcpeer.capable("pushkey"):
375 rb = srcpeer.listkeys('bookmarks')
385 rb = srcpeer.listkeys('bookmarks')
376 for k, n in rb.iteritems():
386 for k, n in rb.iteritems():
377 try:
387 try:
378 m = destrepo.lookup(n)
388 m = destrepo.lookup(n)
379 destrepo._bookmarks[k] = m
389 destrepo._bookmarks[k] = m
380 except error.RepoLookupError:
390 except error.RepoLookupError:
381 pass
391 pass
382 if rb:
392 if rb:
383 bookmarks.write(destrepo)
393 bookmarks.write(destrepo)
384 elif srcrepo and destpeer.capable("pushkey"):
394 elif srcrepo and destpeer.capable("pushkey"):
385 for k, n in srcrepo._bookmarks.iteritems():
395 for k, n in srcrepo._bookmarks.iteritems():
386 destpeer.pushkey('bookmarks', k, '', hex(n))
396 destpeer.pushkey('bookmarks', k, '', hex(n))
387
397
388 if destrepo:
398 if destrepo:
389 fp = destrepo.opener("hgrc", "w", text=True)
399 fp = destrepo.opener("hgrc", "w", text=True)
390 fp.write("[paths]\n")
400 fp.write("[paths]\n")
391 u = util.url(abspath)
401 u = util.url(abspath)
392 u.passwd = None
402 u.passwd = None
393 defaulturl = str(u)
403 defaulturl = str(u)
394 fp.write("default = %s\n" % defaulturl)
404 fp.write("default = %s\n" % defaulturl)
395 fp.close()
405 fp.close()
396
406
397 destrepo.ui.setconfig('paths', 'default', defaulturl)
407 destrepo.ui.setconfig('paths', 'default', defaulturl)
398
408
399 if update:
409 if update:
400 if update is not True:
410 if update is not True:
401 checkout = srcpeer.lookup(update)
411 checkout = srcpeer.lookup(update)
402 for test in (checkout, 'default', 'tip'):
412 for test in (checkout, 'default', 'tip'):
403 if test is None:
413 if test is None:
404 continue
414 continue
405 try:
415 try:
406 uprev = destrepo.lookup(test)
416 uprev = destrepo.lookup(test)
407 break
417 break
408 except error.RepoLookupError:
418 except error.RepoLookupError:
409 continue
419 continue
410 bn = destrepo[uprev].branch()
420 bn = destrepo[uprev].branch()
411 destrepo.ui.status(_("updating to branch %s\n") % bn)
421 destrepo.ui.status(_("updating to branch %s\n") % bn)
412 _update(destrepo, uprev)
422 _update(destrepo, uprev)
413 if update in destrepo._bookmarks:
423 if update in destrepo._bookmarks:
414 bookmarks.setcurrent(destrepo, update)
424 bookmarks.setcurrent(destrepo, update)
415
425
416 return srcpeer, destpeer
426 return srcpeer, destpeer
417 finally:
427 finally:
418 release(srclock, destlock)
428 release(srclock, destlock)
419 if dircleanup is not None:
429 if dircleanup is not None:
420 dircleanup.cleanup()
430 dircleanup.cleanup()
421 if srcpeer is not None:
431 if srcpeer is not None:
422 srcpeer.close()
432 srcpeer.close()
423
433
424 def _showstats(repo, stats):
434 def _showstats(repo, stats):
425 repo.ui.status(_("%d files updated, %d files merged, "
435 repo.ui.status(_("%d files updated, %d files merged, "
426 "%d files removed, %d files unresolved\n") % stats)
436 "%d files removed, %d files unresolved\n") % stats)
427
437
428 def update(repo, node):
438 def update(repo, node):
429 """update the working directory to node, merging linear changes"""
439 """update the working directory to node, merging linear changes"""
430 stats = mergemod.update(repo, node, False, False, None)
440 stats = mergemod.update(repo, node, False, False, None)
431 _showstats(repo, stats)
441 _showstats(repo, stats)
432 if stats[3]:
442 if stats[3]:
433 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
443 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
434 return stats[3] > 0
444 return stats[3] > 0
435
445
436 # naming conflict in clone()
446 # naming conflict in clone()
437 _update = update
447 _update = update
438
448
439 def clean(repo, node, show_stats=True):
449 def clean(repo, node, show_stats=True):
440 """forcibly switch the working directory to node, clobbering changes"""
450 """forcibly switch the working directory to node, clobbering changes"""
441 stats = mergemod.update(repo, node, False, True, None)
451 stats = mergemod.update(repo, node, False, True, None)
442 if show_stats:
452 if show_stats:
443 _showstats(repo, stats)
453 _showstats(repo, stats)
444 return stats[3] > 0
454 return stats[3] > 0
445
455
446 def merge(repo, node, force=None, remind=True):
456 def merge(repo, node, force=None, remind=True):
447 """Branch merge with node, resolving changes. Return true if any
457 """Branch merge with node, resolving changes. Return true if any
448 unresolved conflicts."""
458 unresolved conflicts."""
449 stats = mergemod.update(repo, node, True, force, False)
459 stats = mergemod.update(repo, node, True, force, False)
450 _showstats(repo, stats)
460 _showstats(repo, stats)
451 if stats[3]:
461 if stats[3]:
452 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
462 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
453 "or 'hg update -C .' to abandon\n"))
463 "or 'hg update -C .' to abandon\n"))
454 elif remind:
464 elif remind:
455 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
465 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
456 return stats[3] > 0
466 return stats[3] > 0
457
467
458 def _incoming(displaychlist, subreporecurse, ui, repo, source,
468 def _incoming(displaychlist, subreporecurse, ui, repo, source,
459 opts, buffered=False):
469 opts, buffered=False):
460 """
470 """
461 Helper for incoming / gincoming.
471 Helper for incoming / gincoming.
462 displaychlist gets called with
472 displaychlist gets called with
463 (remoterepo, incomingchangesetlist, displayer) parameters,
473 (remoterepo, incomingchangesetlist, displayer) parameters,
464 and is supposed to contain only code that can't be unified.
474 and is supposed to contain only code that can't be unified.
465 """
475 """
466 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
476 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
467 other = peer(repo, opts, source)
477 other = peer(repo, opts, source)
468 ui.status(_('comparing with %s\n') % util.hidepassword(source))
478 ui.status(_('comparing with %s\n') % util.hidepassword(source))
469 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
479 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
470
480
471 if revs:
481 if revs:
472 revs = [other.lookup(rev) for rev in revs]
482 revs = [other.lookup(rev) for rev in revs]
473 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
483 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
474 revs, opts["bundle"], opts["force"])
484 revs, opts["bundle"], opts["force"])
475 try:
485 try:
476 if not chlist:
486 if not chlist:
477 ui.status(_("no changes found\n"))
487 ui.status(_("no changes found\n"))
478 return subreporecurse()
488 return subreporecurse()
479
489
480 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
490 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
481
491
482 # XXX once graphlog extension makes it into core,
492 # XXX once graphlog extension makes it into core,
483 # should be replaced by a if graph/else
493 # should be replaced by a if graph/else
484 displaychlist(other, chlist, displayer)
494 displaychlist(other, chlist, displayer)
485
495
486 displayer.close()
496 displayer.close()
487 finally:
497 finally:
488 cleanupfn()
498 cleanupfn()
489 subreporecurse()
499 subreporecurse()
490 return 0 # exit code is zero since we found incoming changes
500 return 0 # exit code is zero since we found incoming changes
491
501
492 def incoming(ui, repo, source, opts):
502 def incoming(ui, repo, source, opts):
493 def subreporecurse():
503 def subreporecurse():
494 ret = 1
504 ret = 1
495 if opts.get('subrepos'):
505 if opts.get('subrepos'):
496 ctx = repo[None]
506 ctx = repo[None]
497 for subpath in sorted(ctx.substate):
507 for subpath in sorted(ctx.substate):
498 sub = ctx.sub(subpath)
508 sub = ctx.sub(subpath)
499 ret = min(ret, sub.incoming(ui, source, opts))
509 ret = min(ret, sub.incoming(ui, source, opts))
500 return ret
510 return ret
501
511
502 def display(other, chlist, displayer):
512 def display(other, chlist, displayer):
503 limit = cmdutil.loglimit(opts)
513 limit = cmdutil.loglimit(opts)
504 if opts.get('newest_first'):
514 if opts.get('newest_first'):
505 chlist.reverse()
515 chlist.reverse()
506 count = 0
516 count = 0
507 for n in chlist:
517 for n in chlist:
508 if limit is not None and count >= limit:
518 if limit is not None and count >= limit:
509 break
519 break
510 parents = [p for p in other.changelog.parents(n) if p != nullid]
520 parents = [p for p in other.changelog.parents(n) if p != nullid]
511 if opts.get('no_merges') and len(parents) == 2:
521 if opts.get('no_merges') and len(parents) == 2:
512 continue
522 continue
513 count += 1
523 count += 1
514 displayer.show(other[n])
524 displayer.show(other[n])
515 return _incoming(display, subreporecurse, ui, repo, source, opts)
525 return _incoming(display, subreporecurse, ui, repo, source, opts)
516
526
517 def _outgoing(ui, repo, dest, opts):
527 def _outgoing(ui, repo, dest, opts):
518 dest = ui.expandpath(dest or 'default-push', dest or 'default')
528 dest = ui.expandpath(dest or 'default-push', dest or 'default')
519 dest, branches = parseurl(dest, opts.get('branch'))
529 dest, branches = parseurl(dest, opts.get('branch'))
520 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
530 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
521 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
531 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
522 if revs:
532 if revs:
523 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
533 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
524
534
525 other = peer(repo, opts, dest)
535 other = peer(repo, opts, dest)
526 outgoing = discovery.findcommonoutgoing(repo, other, revs,
536 outgoing = discovery.findcommonoutgoing(repo, other, revs,
527 force=opts.get('force'))
537 force=opts.get('force'))
528 o = outgoing.missing
538 o = outgoing.missing
529 if not o:
539 if not o:
530 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
540 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
531 return None
541 return None
532 return o
542 return o
533
543
534 def outgoing(ui, repo, dest, opts):
544 def outgoing(ui, repo, dest, opts):
535 def recurse():
545 def recurse():
536 ret = 1
546 ret = 1
537 if opts.get('subrepos'):
547 if opts.get('subrepos'):
538 ctx = repo[None]
548 ctx = repo[None]
539 for subpath in sorted(ctx.substate):
549 for subpath in sorted(ctx.substate):
540 sub = ctx.sub(subpath)
550 sub = ctx.sub(subpath)
541 ret = min(ret, sub.outgoing(ui, dest, opts))
551 ret = min(ret, sub.outgoing(ui, dest, opts))
542 return ret
552 return ret
543
553
544 limit = cmdutil.loglimit(opts)
554 limit = cmdutil.loglimit(opts)
545 o = _outgoing(ui, repo, dest, opts)
555 o = _outgoing(ui, repo, dest, opts)
546 if o is None:
556 if o is None:
547 return recurse()
557 return recurse()
548
558
549 if opts.get('newest_first'):
559 if opts.get('newest_first'):
550 o.reverse()
560 o.reverse()
551 displayer = cmdutil.show_changeset(ui, repo, opts)
561 displayer = cmdutil.show_changeset(ui, repo, opts)
552 count = 0
562 count = 0
553 for n in o:
563 for n in o:
554 if limit is not None and count >= limit:
564 if limit is not None and count >= limit:
555 break
565 break
556 parents = [p for p in repo.changelog.parents(n) if p != nullid]
566 parents = [p for p in repo.changelog.parents(n) if p != nullid]
557 if opts.get('no_merges') and len(parents) == 2:
567 if opts.get('no_merges') and len(parents) == 2:
558 continue
568 continue
559 count += 1
569 count += 1
560 displayer.show(repo[n])
570 displayer.show(repo[n])
561 displayer.close()
571 displayer.close()
562 recurse()
572 recurse()
563 return 0 # exit code is zero since we found outgoing changes
573 return 0 # exit code is zero since we found outgoing changes
564
574
565 def revert(repo, node, choose):
575 def revert(repo, node, choose):
566 """revert changes to revision in node without updating dirstate"""
576 """revert changes to revision in node without updating dirstate"""
567 return mergemod.update(repo, node, False, True, choose)[3] > 0
577 return mergemod.update(repo, node, False, True, choose)[3] > 0
568
578
569 def verify(repo):
579 def verify(repo):
570 """verify the consistency of a repository"""
580 """verify the consistency of a repository"""
571 return verifymod.verify(repo)
581 return verifymod.verify(repo)
572
582
573 def remoteui(src, opts):
583 def remoteui(src, opts):
574 'build a remote ui from ui or repo and opts'
584 'build a remote ui from ui or repo and opts'
575 if util.safehasattr(src, 'baseui'): # looks like a repository
585 if util.safehasattr(src, 'baseui'): # looks like a repository
576 dst = src.baseui.copy() # drop repo-specific config
586 dst = src.baseui.copy() # drop repo-specific config
577 src = src.ui # copy target options from repo
587 src = src.ui # copy target options from repo
578 else: # assume it's a global ui object
588 else: # assume it's a global ui object
579 dst = src.copy() # keep all global options
589 dst = src.copy() # keep all global options
580
590
581 # copy ssh-specific options
591 # copy ssh-specific options
582 for o in 'ssh', 'remotecmd':
592 for o in 'ssh', 'remotecmd':
583 v = opts.get(o) or src.config('ui', o)
593 v = opts.get(o) or src.config('ui', o)
584 if v:
594 if v:
585 dst.setconfig("ui", o, v)
595 dst.setconfig("ui", o, v)
586
596
587 # copy bundle-specific options
597 # copy bundle-specific options
588 r = src.config('bundle', 'mainreporoot')
598 r = src.config('bundle', 'mainreporoot')
589 if r:
599 if r:
590 dst.setconfig('bundle', 'mainreporoot', r)
600 dst.setconfig('bundle', 'mainreporoot', r)
591
601
592 # copy selected local settings to the remote ui
602 # copy selected local settings to the remote ui
593 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
603 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
594 for key, val in src.configitems(sect):
604 for key, val in src.configitems(sect):
595 dst.setconfig(sect, key, val)
605 dst.setconfig(sect, key, val)
596 v = src.config('web', 'cacerts')
606 v = src.config('web', 'cacerts')
597 if v:
607 if v:
598 dst.setconfig('web', 'cacerts', util.expandpath(v))
608 dst.setconfig('web', 'cacerts', util.expandpath(v))
599
609
600 return dst
610 return dst
@@ -1,2620 +1,2637 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wvfs = scmutil.vfs(path, expand=True)
120 self.wvfs = scmutil.vfs(path, expand=True)
121 self.wopener = self.wvfs
121 self.wopener = self.wvfs
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.vfs = scmutil.vfs(self.path)
126 self.vfs = scmutil.vfs(self.path)
127 self.opener = self.vfs
127 self.opener = self.vfs
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.svfs = self.store.vfs
187 self.svfs = self.store.vfs
188 self.sopener = self.svfs
188 self.sopener = self.svfs
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.vfs.createmode = self.store.createmode
190 self.vfs.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 if store and not obsolete._enabled:
292 if store and not obsolete._enabled:
293 # message is rare enough to not be translated
293 # message is rare enough to not be translated
294 msg = 'obsolete feature not enabled but %i markers found!\n'
294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 self.ui.warn(msg % len(list(store)))
295 self.ui.warn(msg % len(list(store)))
296 return store
296 return store
297
297
298 @propertycache
298 @propertycache
299 def hiddenrevs(self):
299 def hiddenrevs(self):
300 """hiddenrevs: revs that should be hidden by command and tools
300 """hiddenrevs: revs that should be hidden by command and tools
301
301
302 This set is carried on the repo to ease initialization and lazy
302 This set is carried on the repo to ease initialization and lazy
303 loading; it'll probably move back to changelog for efficiency and
303 loading; it'll probably move back to changelog for efficiency and
304 consistency reasons.
304 consistency reasons.
305
305
306 Note that the hiddenrevs will needs invalidations when
306 Note that the hiddenrevs will needs invalidations when
307 - a new changesets is added (possible unstable above extinct)
307 - a new changesets is added (possible unstable above extinct)
308 - a new obsolete marker is added (possible new extinct changeset)
308 - a new obsolete marker is added (possible new extinct changeset)
309
309
310 hidden changesets cannot have non-hidden descendants
310 hidden changesets cannot have non-hidden descendants
311 """
311 """
312 hidden = set()
312 hidden = set()
313 if self.obsstore:
313 if self.obsstore:
314 ### hide extinct changeset that are not accessible by any mean
314 ### hide extinct changeset that are not accessible by any mean
315 hiddenquery = 'extinct() - ::(. + bookmark())'
315 hiddenquery = 'extinct() - ::(. + bookmark())'
316 hidden.update(self.revs(hiddenquery))
316 hidden.update(self.revs(hiddenquery))
317 return hidden
317 return hidden
318
318
319 @storecache('00changelog.i')
319 @storecache('00changelog.i')
320 def changelog(self):
320 def changelog(self):
321 c = changelog.changelog(self.sopener)
321 c = changelog.changelog(self.sopener)
322 if 'HG_PENDING' in os.environ:
322 if 'HG_PENDING' in os.environ:
323 p = os.environ['HG_PENDING']
323 p = os.environ['HG_PENDING']
324 if p.startswith(self.root):
324 if p.startswith(self.root):
325 c.readpending('00changelog.i.a')
325 c.readpending('00changelog.i.a')
326 return c
326 return c
327
327
328 @storecache('00manifest.i')
328 @storecache('00manifest.i')
329 def manifest(self):
329 def manifest(self):
330 return manifest.manifest(self.sopener)
330 return manifest.manifest(self.sopener)
331
331
332 @filecache('dirstate')
332 @filecache('dirstate')
333 def dirstate(self):
333 def dirstate(self):
334 warned = [0]
334 warned = [0]
335 def validate(node):
335 def validate(node):
336 try:
336 try:
337 self.changelog.rev(node)
337 self.changelog.rev(node)
338 return node
338 return node
339 except error.LookupError:
339 except error.LookupError:
340 if not warned[0]:
340 if not warned[0]:
341 warned[0] = True
341 warned[0] = True
342 self.ui.warn(_("warning: ignoring unknown"
342 self.ui.warn(_("warning: ignoring unknown"
343 " working parent %s!\n") % short(node))
343 " working parent %s!\n") % short(node))
344 return nullid
344 return nullid
345
345
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347
347
348 def __getitem__(self, changeid):
348 def __getitem__(self, changeid):
349 if changeid is None:
349 if changeid is None:
350 return context.workingctx(self)
350 return context.workingctx(self)
351 return context.changectx(self, changeid)
351 return context.changectx(self, changeid)
352
352
353 def __contains__(self, changeid):
353 def __contains__(self, changeid):
354 try:
354 try:
355 return bool(self.lookup(changeid))
355 return bool(self.lookup(changeid))
356 except error.RepoLookupError:
356 except error.RepoLookupError:
357 return False
357 return False
358
358
359 def __nonzero__(self):
359 def __nonzero__(self):
360 return True
360 return True
361
361
362 def __len__(self):
362 def __len__(self):
363 return len(self.changelog)
363 return len(self.changelog)
364
364
365 def __iter__(self):
365 def __iter__(self):
366 return iter(self.changelog)
366 return iter(self.changelog)
367
367
368 def revs(self, expr, *args):
368 def revs(self, expr, *args):
369 '''Return a list of revisions matching the given revset'''
369 '''Return a list of revisions matching the given revset'''
370 expr = revset.formatspec(expr, *args)
370 expr = revset.formatspec(expr, *args)
371 m = revset.match(None, expr)
371 m = revset.match(None, expr)
372 return [r for r in m(self, list(self))]
372 return [r for r in m(self, list(self))]
373
373
374 def set(self, expr, *args):
374 def set(self, expr, *args):
375 '''
375 '''
376 Yield a context for each matching revision, after doing arg
376 Yield a context for each matching revision, after doing arg
377 replacement via revset.formatspec
377 replacement via revset.formatspec
378 '''
378 '''
379 for r in self.revs(expr, *args):
379 for r in self.revs(expr, *args):
380 yield self[r]
380 yield self[r]
381
381
382 def url(self):
382 def url(self):
383 return 'file:' + self.root
383 return 'file:' + self.root
384
384
385 def hook(self, name, throw=False, **args):
385 def hook(self, name, throw=False, **args):
386 return hook.hook(self.ui, self, name, throw, **args)
386 return hook.hook(self.ui, self, name, throw, **args)
387
387
388 tag_disallowed = ':\r\n'
388 tag_disallowed = ':\r\n'
389
389
390 def _tag(self, names, node, message, local, user, date, extra={}):
390 def _tag(self, names, node, message, local, user, date, extra={}):
391 if isinstance(names, str):
391 if isinstance(names, str):
392 allchars = names
392 allchars = names
393 names = (names,)
393 names = (names,)
394 else:
394 else:
395 allchars = ''.join(names)
395 allchars = ''.join(names)
396 for c in self.tag_disallowed:
396 for c in self.tag_disallowed:
397 if c in allchars:
397 if c in allchars:
398 raise util.Abort(_('%r cannot be used in a tag name') % c)
398 raise util.Abort(_('%r cannot be used in a tag name') % c)
399
399
400 branches = self.branchmap()
400 branches = self.branchmap()
401 for name in names:
401 for name in names:
402 self.hook('pretag', throw=True, node=hex(node), tag=name,
402 self.hook('pretag', throw=True, node=hex(node), tag=name,
403 local=local)
403 local=local)
404 if name in branches:
404 if name in branches:
405 self.ui.warn(_("warning: tag %s conflicts with existing"
405 self.ui.warn(_("warning: tag %s conflicts with existing"
406 " branch name\n") % name)
406 " branch name\n") % name)
407
407
408 def writetags(fp, names, munge, prevtags):
408 def writetags(fp, names, munge, prevtags):
409 fp.seek(0, 2)
409 fp.seek(0, 2)
410 if prevtags and prevtags[-1] != '\n':
410 if prevtags and prevtags[-1] != '\n':
411 fp.write('\n')
411 fp.write('\n')
412 for name in names:
412 for name in names:
413 m = munge and munge(name) or name
413 m = munge and munge(name) or name
414 if (self._tagscache.tagtypes and
414 if (self._tagscache.tagtypes and
415 name in self._tagscache.tagtypes):
415 name in self._tagscache.tagtypes):
416 old = self.tags().get(name, nullid)
416 old = self.tags().get(name, nullid)
417 fp.write('%s %s\n' % (hex(old), m))
417 fp.write('%s %s\n' % (hex(old), m))
418 fp.write('%s %s\n' % (hex(node), m))
418 fp.write('%s %s\n' % (hex(node), m))
419 fp.close()
419 fp.close()
420
420
421 prevtags = ''
421 prevtags = ''
422 if local:
422 if local:
423 try:
423 try:
424 fp = self.opener('localtags', 'r+')
424 fp = self.opener('localtags', 'r+')
425 except IOError:
425 except IOError:
426 fp = self.opener('localtags', 'a')
426 fp = self.opener('localtags', 'a')
427 else:
427 else:
428 prevtags = fp.read()
428 prevtags = fp.read()
429
429
430 # local tags are stored in the current charset
430 # local tags are stored in the current charset
431 writetags(fp, names, None, prevtags)
431 writetags(fp, names, None, prevtags)
432 for name in names:
432 for name in names:
433 self.hook('tag', node=hex(node), tag=name, local=local)
433 self.hook('tag', node=hex(node), tag=name, local=local)
434 return
434 return
435
435
436 try:
436 try:
437 fp = self.wfile('.hgtags', 'rb+')
437 fp = self.wfile('.hgtags', 'rb+')
438 except IOError, e:
438 except IOError, e:
439 if e.errno != errno.ENOENT:
439 if e.errno != errno.ENOENT:
440 raise
440 raise
441 fp = self.wfile('.hgtags', 'ab')
441 fp = self.wfile('.hgtags', 'ab')
442 else:
442 else:
443 prevtags = fp.read()
443 prevtags = fp.read()
444
444
445 # committed tags are stored in UTF-8
445 # committed tags are stored in UTF-8
446 writetags(fp, names, encoding.fromlocal, prevtags)
446 writetags(fp, names, encoding.fromlocal, prevtags)
447
447
448 fp.close()
448 fp.close()
449
449
450 self.invalidatecaches()
450 self.invalidatecaches()
451
451
452 if '.hgtags' not in self.dirstate:
452 if '.hgtags' not in self.dirstate:
453 self[None].add(['.hgtags'])
453 self[None].add(['.hgtags'])
454
454
455 m = matchmod.exact(self.root, '', ['.hgtags'])
455 m = matchmod.exact(self.root, '', ['.hgtags'])
456 tagnode = self.commit(message, user, date, extra=extra, match=m)
456 tagnode = self.commit(message, user, date, extra=extra, match=m)
457
457
458 for name in names:
458 for name in names:
459 self.hook('tag', node=hex(node), tag=name, local=local)
459 self.hook('tag', node=hex(node), tag=name, local=local)
460
460
461 return tagnode
461 return tagnode
462
462
463 def tag(self, names, node, message, local, user, date):
463 def tag(self, names, node, message, local, user, date):
464 '''tag a revision with one or more symbolic names.
464 '''tag a revision with one or more symbolic names.
465
465
466 names is a list of strings or, when adding a single tag, names may be a
466 names is a list of strings or, when adding a single tag, names may be a
467 string.
467 string.
468
468
469 if local is True, the tags are stored in a per-repository file.
469 if local is True, the tags are stored in a per-repository file.
470 otherwise, they are stored in the .hgtags file, and a new
470 otherwise, they are stored in the .hgtags file, and a new
471 changeset is committed with the change.
471 changeset is committed with the change.
472
472
473 keyword arguments:
473 keyword arguments:
474
474
475 local: whether to store tags in non-version-controlled file
475 local: whether to store tags in non-version-controlled file
476 (default False)
476 (default False)
477
477
478 message: commit message to use if committing
478 message: commit message to use if committing
479
479
480 user: name of user to use if committing
480 user: name of user to use if committing
481
481
482 date: date tuple to use if committing'''
482 date: date tuple to use if committing'''
483
483
484 if not local:
484 if not local:
485 for x in self.status()[:5]:
485 for x in self.status()[:5]:
486 if '.hgtags' in x:
486 if '.hgtags' in x:
487 raise util.Abort(_('working copy of .hgtags is changed '
487 raise util.Abort(_('working copy of .hgtags is changed '
488 '(please commit .hgtags manually)'))
488 '(please commit .hgtags manually)'))
489
489
490 self.tags() # instantiate the cache
490 self.tags() # instantiate the cache
491 self._tag(names, node, message, local, user, date)
491 self._tag(names, node, message, local, user, date)
492
492
493 @propertycache
493 @propertycache
494 def _tagscache(self):
494 def _tagscache(self):
495 '''Returns a tagscache object that contains various tags related
495 '''Returns a tagscache object that contains various tags related
496 caches.'''
496 caches.'''
497
497
498 # This simplifies its cache management by having one decorated
498 # This simplifies its cache management by having one decorated
499 # function (this one) and the rest simply fetch things from it.
499 # function (this one) and the rest simply fetch things from it.
500 class tagscache(object):
500 class tagscache(object):
501 def __init__(self):
501 def __init__(self):
502 # These two define the set of tags for this repository. tags
502 # These two define the set of tags for this repository. tags
503 # maps tag name to node; tagtypes maps tag name to 'global' or
503 # maps tag name to node; tagtypes maps tag name to 'global' or
504 # 'local'. (Global tags are defined by .hgtags across all
504 # 'local'. (Global tags are defined by .hgtags across all
505 # heads, and local tags are defined in .hg/localtags.)
505 # heads, and local tags are defined in .hg/localtags.)
506 # They constitute the in-memory cache of tags.
506 # They constitute the in-memory cache of tags.
507 self.tags = self.tagtypes = None
507 self.tags = self.tagtypes = None
508
508
509 self.nodetagscache = self.tagslist = None
509 self.nodetagscache = self.tagslist = None
510
510
511 cache = tagscache()
511 cache = tagscache()
512 cache.tags, cache.tagtypes = self._findtags()
512 cache.tags, cache.tagtypes = self._findtags()
513
513
514 return cache
514 return cache
515
515
516 def tags(self):
516 def tags(self):
517 '''return a mapping of tag to node'''
517 '''return a mapping of tag to node'''
518 t = {}
518 t = {}
519 if self.changelog.filteredrevs:
519 if self.changelog.filteredrevs:
520 tags, tt = self._findtags()
520 tags, tt = self._findtags()
521 else:
521 else:
522 tags = self._tagscache.tags
522 tags = self._tagscache.tags
523 for k, v in tags.iteritems():
523 for k, v in tags.iteritems():
524 try:
524 try:
525 # ignore tags to unknown nodes
525 # ignore tags to unknown nodes
526 self.changelog.rev(v)
526 self.changelog.rev(v)
527 t[k] = v
527 t[k] = v
528 except (error.LookupError, ValueError):
528 except (error.LookupError, ValueError):
529 pass
529 pass
530 return t
530 return t
531
531
532 def _findtags(self):
532 def _findtags(self):
533 '''Do the hard work of finding tags. Return a pair of dicts
533 '''Do the hard work of finding tags. Return a pair of dicts
534 (tags, tagtypes) where tags maps tag name to node, and tagtypes
534 (tags, tagtypes) where tags maps tag name to node, and tagtypes
535 maps tag name to a string like \'global\' or \'local\'.
535 maps tag name to a string like \'global\' or \'local\'.
536 Subclasses or extensions are free to add their own tags, but
536 Subclasses or extensions are free to add their own tags, but
537 should be aware that the returned dicts will be retained for the
537 should be aware that the returned dicts will be retained for the
538 duration of the localrepo object.'''
538 duration of the localrepo object.'''
539
539
540 # XXX what tagtype should subclasses/extensions use? Currently
540 # XXX what tagtype should subclasses/extensions use? Currently
541 # mq and bookmarks add tags, but do not set the tagtype at all.
541 # mq and bookmarks add tags, but do not set the tagtype at all.
542 # Should each extension invent its own tag type? Should there
542 # Should each extension invent its own tag type? Should there
543 # be one tagtype for all such "virtual" tags? Or is the status
543 # be one tagtype for all such "virtual" tags? Or is the status
544 # quo fine?
544 # quo fine?
545
545
546 alltags = {} # map tag name to (node, hist)
546 alltags = {} # map tag name to (node, hist)
547 tagtypes = {}
547 tagtypes = {}
548
548
549 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
549 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
550 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
550 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
551
551
552 # Build the return dicts. Have to re-encode tag names because
552 # Build the return dicts. Have to re-encode tag names because
553 # the tags module always uses UTF-8 (in order not to lose info
553 # the tags module always uses UTF-8 (in order not to lose info
554 # writing to the cache), but the rest of Mercurial wants them in
554 # writing to the cache), but the rest of Mercurial wants them in
555 # local encoding.
555 # local encoding.
556 tags = {}
556 tags = {}
557 for (name, (node, hist)) in alltags.iteritems():
557 for (name, (node, hist)) in alltags.iteritems():
558 if node != nullid:
558 if node != nullid:
559 tags[encoding.tolocal(name)] = node
559 tags[encoding.tolocal(name)] = node
560 tags['tip'] = self.changelog.tip()
560 tags['tip'] = self.changelog.tip()
561 tagtypes = dict([(encoding.tolocal(name), value)
561 tagtypes = dict([(encoding.tolocal(name), value)
562 for (name, value) in tagtypes.iteritems()])
562 for (name, value) in tagtypes.iteritems()])
563 return (tags, tagtypes)
563 return (tags, tagtypes)
564
564
565 def tagtype(self, tagname):
565 def tagtype(self, tagname):
566 '''
566 '''
567 return the type of the given tag. result can be:
567 return the type of the given tag. result can be:
568
568
569 'local' : a local tag
569 'local' : a local tag
570 'global' : a global tag
570 'global' : a global tag
571 None : tag does not exist
571 None : tag does not exist
572 '''
572 '''
573
573
574 return self._tagscache.tagtypes.get(tagname)
574 return self._tagscache.tagtypes.get(tagname)
575
575
576 def tagslist(self):
576 def tagslist(self):
577 '''return a list of tags ordered by revision'''
577 '''return a list of tags ordered by revision'''
578 if not self._tagscache.tagslist:
578 if not self._tagscache.tagslist:
579 l = []
579 l = []
580 for t, n in self.tags().iteritems():
580 for t, n in self.tags().iteritems():
581 r = self.changelog.rev(n)
581 r = self.changelog.rev(n)
582 l.append((r, t, n))
582 l.append((r, t, n))
583 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
583 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
584
584
585 return self._tagscache.tagslist
585 return self._tagscache.tagslist
586
586
587 def nodetags(self, node):
587 def nodetags(self, node):
588 '''return the tags associated with a node'''
588 '''return the tags associated with a node'''
589 if not self._tagscache.nodetagscache:
589 if not self._tagscache.nodetagscache:
590 nodetagscache = {}
590 nodetagscache = {}
591 for t, n in self._tagscache.tags.iteritems():
591 for t, n in self._tagscache.tags.iteritems():
592 nodetagscache.setdefault(n, []).append(t)
592 nodetagscache.setdefault(n, []).append(t)
593 for tags in nodetagscache.itervalues():
593 for tags in nodetagscache.itervalues():
594 tags.sort()
594 tags.sort()
595 self._tagscache.nodetagscache = nodetagscache
595 self._tagscache.nodetagscache = nodetagscache
596 return self._tagscache.nodetagscache.get(node, [])
596 return self._tagscache.nodetagscache.get(node, [])
597
597
598 def nodebookmarks(self, node):
598 def nodebookmarks(self, node):
599 marks = []
599 marks = []
600 for bookmark, n in self._bookmarks.iteritems():
600 for bookmark, n in self._bookmarks.iteritems():
601 if n == node:
601 if n == node:
602 marks.append(bookmark)
602 marks.append(bookmark)
603 return sorted(marks)
603 return sorted(marks)
604
604
605 def _branchtags(self, partial, lrev):
605 def _branchtags(self, partial, lrev):
606 # TODO: rename this function?
606 # TODO: rename this function?
607 tiprev = len(self) - 1
607 tiprev = len(self) - 1
608 if lrev != tiprev:
608 if lrev != tiprev:
609 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
609 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
610 self._updatebranchcache(partial, ctxgen)
610 self._updatebranchcache(partial, ctxgen)
611 self._writebranchcache(partial, self.changelog.tip(), tiprev)
611 self._writebranchcache(partial, self.changelog.tip(), tiprev)
612
612
613 return partial
613 return partial
614
614
615 def updatebranchcache(self):
615 def updatebranchcache(self):
616 tip = self.changelog.tip()
616 tip = self.changelog.tip()
617 if self._branchcache is not None and self._branchcachetip == tip:
617 if self._branchcache is not None and self._branchcachetip == tip:
618 return
618 return
619
619
620 oldtip = self._branchcachetip
620 oldtip = self._branchcachetip
621 self._branchcachetip = tip
621 self._branchcachetip = tip
622 if oldtip is None or oldtip not in self.changelog.nodemap:
622 if oldtip is None or oldtip not in self.changelog.nodemap:
623 partial, last, lrev = self._readbranchcache()
623 partial, last, lrev = self._readbranchcache()
624 else:
624 else:
625 lrev = self.changelog.rev(oldtip)
625 lrev = self.changelog.rev(oldtip)
626 partial = self._branchcache
626 partial = self._branchcache
627
627
628 self._branchtags(partial, lrev)
628 self._branchtags(partial, lrev)
629 # this private cache holds all heads (not just the branch tips)
629 # this private cache holds all heads (not just the branch tips)
630 self._branchcache = partial
630 self._branchcache = partial
631
631
632 def branchmap(self):
632 def branchmap(self):
633 '''returns a dictionary {branch: [branchheads]}'''
633 '''returns a dictionary {branch: [branchheads]}'''
634 if self.changelog.filteredrevs:
634 if self.changelog.filteredrevs:
635 # some changeset are excluded we can't use the cache
635 # some changeset are excluded we can't use the cache
636 branchmap = {}
636 branchmap = {}
637 self._updatebranchcache(branchmap, (self[r] for r in self))
637 self._updatebranchcache(branchmap, (self[r] for r in self))
638 return branchmap
638 return branchmap
639 else:
639 else:
640 self.updatebranchcache()
640 self.updatebranchcache()
641 return self._branchcache
641 return self._branchcache
642
642
643
643
644 def _branchtip(self, heads):
644 def _branchtip(self, heads):
645 '''return the tipmost branch head in heads'''
645 '''return the tipmost branch head in heads'''
646 tip = heads[-1]
646 tip = heads[-1]
647 for h in reversed(heads):
647 for h in reversed(heads):
648 if not self[h].closesbranch():
648 if not self[h].closesbranch():
649 tip = h
649 tip = h
650 break
650 break
651 return tip
651 return tip
652
652
653 def branchtip(self, branch):
653 def branchtip(self, branch):
654 '''return the tip node for a given branch'''
654 '''return the tip node for a given branch'''
655 if branch not in self.branchmap():
655 if branch not in self.branchmap():
656 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
656 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
657 return self._branchtip(self.branchmap()[branch])
657 return self._branchtip(self.branchmap()[branch])
658
658
659 def branchtags(self):
659 def branchtags(self):
660 '''return a dict where branch names map to the tipmost head of
660 '''return a dict where branch names map to the tipmost head of
661 the branch, open heads come before closed'''
661 the branch, open heads come before closed'''
662 bt = {}
662 bt = {}
663 for bn, heads in self.branchmap().iteritems():
663 for bn, heads in self.branchmap().iteritems():
664 bt[bn] = self._branchtip(heads)
664 bt[bn] = self._branchtip(heads)
665 return bt
665 return bt
666
666
667 def _readbranchcache(self):
667 def _readbranchcache(self):
668 partial = {}
668 partial = {}
669 try:
669 try:
670 f = self.opener("cache/branchheads")
670 f = self.opener("cache/branchheads")
671 lines = f.read().split('\n')
671 lines = f.read().split('\n')
672 f.close()
672 f.close()
673 except (IOError, OSError):
673 except (IOError, OSError):
674 return {}, nullid, nullrev
674 return {}, nullid, nullrev
675
675
676 try:
676 try:
677 last, lrev = lines.pop(0).split(" ", 1)
677 last, lrev = lines.pop(0).split(" ", 1)
678 last, lrev = bin(last), int(lrev)
678 last, lrev = bin(last), int(lrev)
679 if lrev >= len(self) or self[lrev].node() != last:
679 if lrev >= len(self) or self[lrev].node() != last:
680 # invalidate the cache
680 # invalidate the cache
681 raise ValueError('invalidating branch cache (tip differs)')
681 raise ValueError('invalidating branch cache (tip differs)')
682 for l in lines:
682 for l in lines:
683 if not l:
683 if not l:
684 continue
684 continue
685 node, label = l.split(" ", 1)
685 node, label = l.split(" ", 1)
686 label = encoding.tolocal(label.strip())
686 label = encoding.tolocal(label.strip())
687 if not node in self:
687 if not node in self:
688 raise ValueError('invalidating branch cache because node '+
688 raise ValueError('invalidating branch cache because node '+
689 '%s does not exist' % node)
689 '%s does not exist' % node)
690 partial.setdefault(label, []).append(bin(node))
690 partial.setdefault(label, []).append(bin(node))
691 except KeyboardInterrupt:
691 except KeyboardInterrupt:
692 raise
692 raise
693 except Exception, inst:
693 except Exception, inst:
694 if self.ui.debugflag:
694 if self.ui.debugflag:
695 self.ui.warn(str(inst), '\n')
695 self.ui.warn(str(inst), '\n')
696 partial, last, lrev = {}, nullid, nullrev
696 partial, last, lrev = {}, nullid, nullrev
697 return partial, last, lrev
697 return partial, last, lrev
698
698
699 def _writebranchcache(self, branches, tip, tiprev):
699 def _writebranchcache(self, branches, tip, tiprev):
700 try:
700 try:
701 f = self.opener("cache/branchheads", "w", atomictemp=True)
701 f = self.opener("cache/branchheads", "w", atomictemp=True)
702 f.write("%s %s\n" % (hex(tip), tiprev))
702 f.write("%s %s\n" % (hex(tip), tiprev))
703 for label, nodes in branches.iteritems():
703 for label, nodes in branches.iteritems():
704 for node in nodes:
704 for node in nodes:
705 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
705 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
706 f.close()
706 f.close()
707 except (IOError, OSError):
707 except (IOError, OSError):
708 pass
708 pass
709
709
710 def _updatebranchcache(self, partial, ctxgen):
710 def _updatebranchcache(self, partial, ctxgen):
711 """Given a branchhead cache, partial, that may have extra nodes or be
711 """Given a branchhead cache, partial, that may have extra nodes or be
712 missing heads, and a generator of nodes that are at least a superset of
712 missing heads, and a generator of nodes that are at least a superset of
713 heads missing, this function updates partial to be correct.
713 heads missing, this function updates partial to be correct.
714 """
714 """
715 # collect new branch entries
715 # collect new branch entries
716 newbranches = {}
716 newbranches = {}
717 for c in ctxgen:
717 for c in ctxgen:
718 newbranches.setdefault(c.branch(), []).append(c.node())
718 newbranches.setdefault(c.branch(), []).append(c.node())
719 # if older branchheads are reachable from new ones, they aren't
719 # if older branchheads are reachable from new ones, they aren't
720 # really branchheads. Note checking parents is insufficient:
720 # really branchheads. Note checking parents is insufficient:
721 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
721 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
722 for branch, newnodes in newbranches.iteritems():
722 for branch, newnodes in newbranches.iteritems():
723 bheads = partial.setdefault(branch, [])
723 bheads = partial.setdefault(branch, [])
724 # Remove candidate heads that no longer are in the repo (e.g., as
724 # Remove candidate heads that no longer are in the repo (e.g., as
725 # the result of a strip that just happened). Avoid using 'node in
725 # the result of a strip that just happened). Avoid using 'node in
726 # self' here because that dives down into branchcache code somewhat
726 # self' here because that dives down into branchcache code somewhat
727 # recursively.
727 # recursively.
728 bheadrevs = [self.changelog.rev(node) for node in bheads
728 bheadrevs = [self.changelog.rev(node) for node in bheads
729 if self.changelog.hasnode(node)]
729 if self.changelog.hasnode(node)]
730 newheadrevs = [self.changelog.rev(node) for node in newnodes
730 newheadrevs = [self.changelog.rev(node) for node in newnodes
731 if self.changelog.hasnode(node)]
731 if self.changelog.hasnode(node)]
732 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
732 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
733 # Remove duplicates - nodes that are in newheadrevs and are already
733 # Remove duplicates - nodes that are in newheadrevs and are already
734 # in bheadrevs. This can happen if you strip a node whose parent
734 # in bheadrevs. This can happen if you strip a node whose parent
735 # was already a head (because they're on different branches).
735 # was already a head (because they're on different branches).
736 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
736 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
737
737
738 # Starting from tip means fewer passes over reachable. If we know
738 # Starting from tip means fewer passes over reachable. If we know
739 # the new candidates are not ancestors of existing heads, we don't
739 # the new candidates are not ancestors of existing heads, we don't
740 # have to examine ancestors of existing heads
740 # have to examine ancestors of existing heads
741 if ctxisnew:
741 if ctxisnew:
742 iterrevs = sorted(newheadrevs)
742 iterrevs = sorted(newheadrevs)
743 else:
743 else:
744 iterrevs = list(bheadrevs)
744 iterrevs = list(bheadrevs)
745
745
746 # This loop prunes out two kinds of heads - heads that are
746 # This loop prunes out two kinds of heads - heads that are
747 # superseded by a head in newheadrevs, and newheadrevs that are not
747 # superseded by a head in newheadrevs, and newheadrevs that are not
748 # heads because an existing head is their descendant.
748 # heads because an existing head is their descendant.
749 while iterrevs:
749 while iterrevs:
750 latest = iterrevs.pop()
750 latest = iterrevs.pop()
751 if latest not in bheadrevs:
751 if latest not in bheadrevs:
752 continue
752 continue
753 ancestors = set(self.changelog.ancestors([latest],
753 ancestors = set(self.changelog.ancestors([latest],
754 bheadrevs[0]))
754 bheadrevs[0]))
755 if ancestors:
755 if ancestors:
756 bheadrevs = [b for b in bheadrevs if b not in ancestors]
756 bheadrevs = [b for b in bheadrevs if b not in ancestors]
757 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
757 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
758
758
759 # There may be branches that cease to exist when the last commit in the
759 # There may be branches that cease to exist when the last commit in the
760 # branch was stripped. This code filters them out. Note that the
760 # branch was stripped. This code filters them out. Note that the
761 # branch that ceased to exist may not be in newbranches because
761 # branch that ceased to exist may not be in newbranches because
762 # newbranches is the set of candidate heads, which when you strip the
762 # newbranches is the set of candidate heads, which when you strip the
763 # last commit in a branch will be the parent branch.
763 # last commit in a branch will be the parent branch.
764 for branch in partial.keys():
764 for branch in partial.keys():
765 nodes = [head for head in partial[branch]
765 nodes = [head for head in partial[branch]
766 if self.changelog.hasnode(head)]
766 if self.changelog.hasnode(head)]
767 if not nodes:
767 if not nodes:
768 del partial[branch]
768 del partial[branch]
769
769
770 def lookup(self, key):
770 def lookup(self, key):
771 return self[key].node()
771 return self[key].node()
772
772
773 def lookupbranch(self, key, remote=None):
773 def lookupbranch(self, key, remote=None):
774 repo = remote or self
774 repo = remote or self
775 if key in repo.branchmap():
775 if key in repo.branchmap():
776 return key
776 return key
777
777
778 repo = (remote and remote.local()) and remote or self
778 repo = (remote and remote.local()) and remote or self
779 return repo[key].branch()
779 return repo[key].branch()
780
780
781 def known(self, nodes):
781 def known(self, nodes):
782 nm = self.changelog.nodemap
782 nm = self.changelog.nodemap
783 pc = self._phasecache
783 pc = self._phasecache
784 result = []
784 result = []
785 for n in nodes:
785 for n in nodes:
786 r = nm.get(n)
786 r = nm.get(n)
787 resp = not (r is None or pc.phase(self, r) >= phases.secret)
787 resp = not (r is None or pc.phase(self, r) >= phases.secret)
788 result.append(resp)
788 result.append(resp)
789 return result
789 return result
790
790
791 def local(self):
791 def local(self):
792 return self
792 return self
793
793
794 def cancopy(self):
794 def cancopy(self):
795 return self.local() # so statichttprepo's override of local() works
795 return self.local() # so statichttprepo's override of local() works
796
796
797 def join(self, f):
797 def join(self, f):
798 return os.path.join(self.path, f)
798 return os.path.join(self.path, f)
799
799
800 def wjoin(self, f):
800 def wjoin(self, f):
801 return os.path.join(self.root, f)
801 return os.path.join(self.root, f)
802
802
803 def file(self, f):
803 def file(self, f):
804 if f[0] == '/':
804 if f[0] == '/':
805 f = f[1:]
805 f = f[1:]
806 return filelog.filelog(self.sopener, f)
806 return filelog.filelog(self.sopener, f)
807
807
808 def changectx(self, changeid):
808 def changectx(self, changeid):
809 return self[changeid]
809 return self[changeid]
810
810
811 def parents(self, changeid=None):
811 def parents(self, changeid=None):
812 '''get list of changectxs for parents of changeid'''
812 '''get list of changectxs for parents of changeid'''
813 return self[changeid].parents()
813 return self[changeid].parents()
814
814
815 def setparents(self, p1, p2=nullid):
815 def setparents(self, p1, p2=nullid):
816 copies = self.dirstate.setparents(p1, p2)
816 copies = self.dirstate.setparents(p1, p2)
817 if copies:
817 if copies:
818 # Adjust copy records, the dirstate cannot do it, it
818 # Adjust copy records, the dirstate cannot do it, it
819 # requires access to parents manifests. Preserve them
819 # requires access to parents manifests. Preserve them
820 # only for entries added to first parent.
820 # only for entries added to first parent.
821 pctx = self[p1]
821 pctx = self[p1]
822 for f in copies:
822 for f in copies:
823 if f not in pctx and copies[f] in pctx:
823 if f not in pctx and copies[f] in pctx:
824 self.dirstate.copy(copies[f], f)
824 self.dirstate.copy(copies[f], f)
825
825
826 def filectx(self, path, changeid=None, fileid=None):
826 def filectx(self, path, changeid=None, fileid=None):
827 """changeid can be a changeset revision, node, or tag.
827 """changeid can be a changeset revision, node, or tag.
828 fileid can be a file revision or node."""
828 fileid can be a file revision or node."""
829 return context.filectx(self, path, changeid, fileid)
829 return context.filectx(self, path, changeid, fileid)
830
830
831 def getcwd(self):
831 def getcwd(self):
832 return self.dirstate.getcwd()
832 return self.dirstate.getcwd()
833
833
834 def pathto(self, f, cwd=None):
834 def pathto(self, f, cwd=None):
835 return self.dirstate.pathto(f, cwd)
835 return self.dirstate.pathto(f, cwd)
836
836
837 def wfile(self, f, mode='r'):
837 def wfile(self, f, mode='r'):
838 return self.wopener(f, mode)
838 return self.wopener(f, mode)
839
839
840 def _link(self, f):
840 def _link(self, f):
841 return os.path.islink(self.wjoin(f))
841 return os.path.islink(self.wjoin(f))
842
842
843 def _loadfilter(self, filter):
843 def _loadfilter(self, filter):
844 if filter not in self.filterpats:
844 if filter not in self.filterpats:
845 l = []
845 l = []
846 for pat, cmd in self.ui.configitems(filter):
846 for pat, cmd in self.ui.configitems(filter):
847 if cmd == '!':
847 if cmd == '!':
848 continue
848 continue
849 mf = matchmod.match(self.root, '', [pat])
849 mf = matchmod.match(self.root, '', [pat])
850 fn = None
850 fn = None
851 params = cmd
851 params = cmd
852 for name, filterfn in self._datafilters.iteritems():
852 for name, filterfn in self._datafilters.iteritems():
853 if cmd.startswith(name):
853 if cmd.startswith(name):
854 fn = filterfn
854 fn = filterfn
855 params = cmd[len(name):].lstrip()
855 params = cmd[len(name):].lstrip()
856 break
856 break
857 if not fn:
857 if not fn:
858 fn = lambda s, c, **kwargs: util.filter(s, c)
858 fn = lambda s, c, **kwargs: util.filter(s, c)
859 # Wrap old filters not supporting keyword arguments
859 # Wrap old filters not supporting keyword arguments
860 if not inspect.getargspec(fn)[2]:
860 if not inspect.getargspec(fn)[2]:
861 oldfn = fn
861 oldfn = fn
862 fn = lambda s, c, **kwargs: oldfn(s, c)
862 fn = lambda s, c, **kwargs: oldfn(s, c)
863 l.append((mf, fn, params))
863 l.append((mf, fn, params))
864 self.filterpats[filter] = l
864 self.filterpats[filter] = l
865 return self.filterpats[filter]
865 return self.filterpats[filter]
866
866
867 def _filter(self, filterpats, filename, data):
867 def _filter(self, filterpats, filename, data):
868 for mf, fn, cmd in filterpats:
868 for mf, fn, cmd in filterpats:
869 if mf(filename):
869 if mf(filename):
870 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
870 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
871 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
871 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
872 break
872 break
873
873
874 return data
874 return data
875
875
876 @propertycache
876 @propertycache
877 def _encodefilterpats(self):
877 def _encodefilterpats(self):
878 return self._loadfilter('encode')
878 return self._loadfilter('encode')
879
879
880 @propertycache
880 @propertycache
881 def _decodefilterpats(self):
881 def _decodefilterpats(self):
882 return self._loadfilter('decode')
882 return self._loadfilter('decode')
883
883
884 def adddatafilter(self, name, filter):
884 def adddatafilter(self, name, filter):
885 self._datafilters[name] = filter
885 self._datafilters[name] = filter
886
886
887 def wread(self, filename):
887 def wread(self, filename):
888 if self._link(filename):
888 if self._link(filename):
889 data = os.readlink(self.wjoin(filename))
889 data = os.readlink(self.wjoin(filename))
890 else:
890 else:
891 data = self.wopener.read(filename)
891 data = self.wopener.read(filename)
892 return self._filter(self._encodefilterpats, filename, data)
892 return self._filter(self._encodefilterpats, filename, data)
893
893
894 def wwrite(self, filename, data, flags):
894 def wwrite(self, filename, data, flags):
895 data = self._filter(self._decodefilterpats, filename, data)
895 data = self._filter(self._decodefilterpats, filename, data)
896 if 'l' in flags:
896 if 'l' in flags:
897 self.wopener.symlink(data, filename)
897 self.wopener.symlink(data, filename)
898 else:
898 else:
899 self.wopener.write(filename, data)
899 self.wopener.write(filename, data)
900 if 'x' in flags:
900 if 'x' in flags:
901 util.setflags(self.wjoin(filename), False, True)
901 util.setflags(self.wjoin(filename), False, True)
902
902
903 def wwritedata(self, filename, data):
903 def wwritedata(self, filename, data):
904 return self._filter(self._decodefilterpats, filename, data)
904 return self._filter(self._decodefilterpats, filename, data)
905
905
906 def transaction(self, desc):
906 def transaction(self, desc):
907 tr = self._transref and self._transref() or None
907 tr = self._transref and self._transref() or None
908 if tr and tr.running():
908 if tr and tr.running():
909 return tr.nest()
909 return tr.nest()
910
910
911 # abort here if the journal already exists
911 # abort here if the journal already exists
912 if os.path.exists(self.sjoin("journal")):
912 if os.path.exists(self.sjoin("journal")):
913 raise error.RepoError(
913 raise error.RepoError(
914 _("abandoned transaction found - run hg recover"))
914 _("abandoned transaction found - run hg recover"))
915
915
916 self._writejournal(desc)
916 self._writejournal(desc)
917 renames = [(x, undoname(x)) for x in self._journalfiles()]
917 renames = [(x, undoname(x)) for x in self._journalfiles()]
918
918
919 tr = transaction.transaction(self.ui.warn, self.sopener,
919 tr = transaction.transaction(self.ui.warn, self.sopener,
920 self.sjoin("journal"),
920 self.sjoin("journal"),
921 aftertrans(renames),
921 aftertrans(renames),
922 self.store.createmode)
922 self.store.createmode)
923 self._transref = weakref.ref(tr)
923 self._transref = weakref.ref(tr)
924 return tr
924 return tr
925
925
926 def _journalfiles(self):
926 def _journalfiles(self):
927 return (self.sjoin('journal'), self.join('journal.dirstate'),
927 return (self.sjoin('journal'), self.join('journal.dirstate'),
928 self.join('journal.branch'), self.join('journal.desc'),
928 self.join('journal.branch'), self.join('journal.desc'),
929 self.join('journal.bookmarks'),
929 self.join('journal.bookmarks'),
930 self.sjoin('journal.phaseroots'))
930 self.sjoin('journal.phaseroots'))
931
931
932 def undofiles(self):
932 def undofiles(self):
933 return [undoname(x) for x in self._journalfiles()]
933 return [undoname(x) for x in self._journalfiles()]
934
934
935 def _writejournal(self, desc):
935 def _writejournal(self, desc):
936 self.opener.write("journal.dirstate",
936 self.opener.write("journal.dirstate",
937 self.opener.tryread("dirstate"))
937 self.opener.tryread("dirstate"))
938 self.opener.write("journal.branch",
938 self.opener.write("journal.branch",
939 encoding.fromlocal(self.dirstate.branch()))
939 encoding.fromlocal(self.dirstate.branch()))
940 self.opener.write("journal.desc",
940 self.opener.write("journal.desc",
941 "%d\n%s\n" % (len(self), desc))
941 "%d\n%s\n" % (len(self), desc))
942 self.opener.write("journal.bookmarks",
942 self.opener.write("journal.bookmarks",
943 self.opener.tryread("bookmarks"))
943 self.opener.tryread("bookmarks"))
944 self.sopener.write("journal.phaseroots",
944 self.sopener.write("journal.phaseroots",
945 self.sopener.tryread("phaseroots"))
945 self.sopener.tryread("phaseroots"))
946
946
947 def recover(self):
947 def recover(self):
948 lock = self.lock()
948 lock = self.lock()
949 try:
949 try:
950 if os.path.exists(self.sjoin("journal")):
950 if os.path.exists(self.sjoin("journal")):
951 self.ui.status(_("rolling back interrupted transaction\n"))
951 self.ui.status(_("rolling back interrupted transaction\n"))
952 transaction.rollback(self.sopener, self.sjoin("journal"),
952 transaction.rollback(self.sopener, self.sjoin("journal"),
953 self.ui.warn)
953 self.ui.warn)
954 self.invalidate()
954 self.invalidate()
955 return True
955 return True
956 else:
956 else:
957 self.ui.warn(_("no interrupted transaction available\n"))
957 self.ui.warn(_("no interrupted transaction available\n"))
958 return False
958 return False
959 finally:
959 finally:
960 lock.release()
960 lock.release()
961
961
962 def rollback(self, dryrun=False, force=False):
962 def rollback(self, dryrun=False, force=False):
963 wlock = lock = None
963 wlock = lock = None
964 try:
964 try:
965 wlock = self.wlock()
965 wlock = self.wlock()
966 lock = self.lock()
966 lock = self.lock()
967 if os.path.exists(self.sjoin("undo")):
967 if os.path.exists(self.sjoin("undo")):
968 return self._rollback(dryrun, force)
968 return self._rollback(dryrun, force)
969 else:
969 else:
970 self.ui.warn(_("no rollback information available\n"))
970 self.ui.warn(_("no rollback information available\n"))
971 return 1
971 return 1
972 finally:
972 finally:
973 release(lock, wlock)
973 release(lock, wlock)
974
974
975 def _rollback(self, dryrun, force):
975 def _rollback(self, dryrun, force):
976 ui = self.ui
976 ui = self.ui
977 try:
977 try:
978 args = self.opener.read('undo.desc').splitlines()
978 args = self.opener.read('undo.desc').splitlines()
979 (oldlen, desc, detail) = (int(args[0]), args[1], None)
979 (oldlen, desc, detail) = (int(args[0]), args[1], None)
980 if len(args) >= 3:
980 if len(args) >= 3:
981 detail = args[2]
981 detail = args[2]
982 oldtip = oldlen - 1
982 oldtip = oldlen - 1
983
983
984 if detail and ui.verbose:
984 if detail and ui.verbose:
985 msg = (_('repository tip rolled back to revision %s'
985 msg = (_('repository tip rolled back to revision %s'
986 ' (undo %s: %s)\n')
986 ' (undo %s: %s)\n')
987 % (oldtip, desc, detail))
987 % (oldtip, desc, detail))
988 else:
988 else:
989 msg = (_('repository tip rolled back to revision %s'
989 msg = (_('repository tip rolled back to revision %s'
990 ' (undo %s)\n')
990 ' (undo %s)\n')
991 % (oldtip, desc))
991 % (oldtip, desc))
992 except IOError:
992 except IOError:
993 msg = _('rolling back unknown transaction\n')
993 msg = _('rolling back unknown transaction\n')
994 desc = None
994 desc = None
995
995
996 if not force and self['.'] != self['tip'] and desc == 'commit':
996 if not force and self['.'] != self['tip'] and desc == 'commit':
997 raise util.Abort(
997 raise util.Abort(
998 _('rollback of last commit while not checked out '
998 _('rollback of last commit while not checked out '
999 'may lose data'), hint=_('use -f to force'))
999 'may lose data'), hint=_('use -f to force'))
1000
1000
1001 ui.status(msg)
1001 ui.status(msg)
1002 if dryrun:
1002 if dryrun:
1003 return 0
1003 return 0
1004
1004
1005 parents = self.dirstate.parents()
1005 parents = self.dirstate.parents()
1006 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1006 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1007 if os.path.exists(self.join('undo.bookmarks')):
1007 if os.path.exists(self.join('undo.bookmarks')):
1008 util.rename(self.join('undo.bookmarks'),
1008 util.rename(self.join('undo.bookmarks'),
1009 self.join('bookmarks'))
1009 self.join('bookmarks'))
1010 if os.path.exists(self.sjoin('undo.phaseroots')):
1010 if os.path.exists(self.sjoin('undo.phaseroots')):
1011 util.rename(self.sjoin('undo.phaseroots'),
1011 util.rename(self.sjoin('undo.phaseroots'),
1012 self.sjoin('phaseroots'))
1012 self.sjoin('phaseroots'))
1013 self.invalidate()
1013 self.invalidate()
1014
1014
1015 # Discard all cache entries to force reloading everything.
1015 # Discard all cache entries to force reloading everything.
1016 self._filecache.clear()
1016 self._filecache.clear()
1017
1017
1018 parentgone = (parents[0] not in self.changelog.nodemap or
1018 parentgone = (parents[0] not in self.changelog.nodemap or
1019 parents[1] not in self.changelog.nodemap)
1019 parents[1] not in self.changelog.nodemap)
1020 if parentgone:
1020 if parentgone:
1021 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1021 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1022 try:
1022 try:
1023 branch = self.opener.read('undo.branch')
1023 branch = self.opener.read('undo.branch')
1024 self.dirstate.setbranch(encoding.tolocal(branch))
1024 self.dirstate.setbranch(encoding.tolocal(branch))
1025 except IOError:
1025 except IOError:
1026 ui.warn(_('named branch could not be reset: '
1026 ui.warn(_('named branch could not be reset: '
1027 'current branch is still \'%s\'\n')
1027 'current branch is still \'%s\'\n')
1028 % self.dirstate.branch())
1028 % self.dirstate.branch())
1029
1029
1030 self.dirstate.invalidate()
1030 self.dirstate.invalidate()
1031 parents = tuple([p.rev() for p in self.parents()])
1031 parents = tuple([p.rev() for p in self.parents()])
1032 if len(parents) > 1:
1032 if len(parents) > 1:
1033 ui.status(_('working directory now based on '
1033 ui.status(_('working directory now based on '
1034 'revisions %d and %d\n') % parents)
1034 'revisions %d and %d\n') % parents)
1035 else:
1035 else:
1036 ui.status(_('working directory now based on '
1036 ui.status(_('working directory now based on '
1037 'revision %d\n') % parents)
1037 'revision %d\n') % parents)
1038 # TODO: if we know which new heads may result from this rollback, pass
1038 # TODO: if we know which new heads may result from this rollback, pass
1039 # them to destroy(), which will prevent the branchhead cache from being
1039 # them to destroy(), which will prevent the branchhead cache from being
1040 # invalidated.
1040 # invalidated.
1041 self.destroyed()
1041 self.destroyed()
1042 return 0
1042 return 0
1043
1043
1044 def invalidatecaches(self):
1044 def invalidatecaches(self):
1045 def delcache(name):
1045 def delcache(name):
1046 try:
1046 try:
1047 delattr(self, name)
1047 delattr(self, name)
1048 except AttributeError:
1048 except AttributeError:
1049 pass
1049 pass
1050
1050
1051 delcache('_tagscache')
1051 delcache('_tagscache')
1052
1052
1053 self._branchcache = None # in UTF-8
1053 self._branchcache = None # in UTF-8
1054 self._branchcachetip = None
1054 self._branchcachetip = None
1055 obsolete.clearobscaches(self)
1055 obsolete.clearobscaches(self)
1056
1056
1057 def invalidatedirstate(self):
1057 def invalidatedirstate(self):
1058 '''Invalidates the dirstate, causing the next call to dirstate
1058 '''Invalidates the dirstate, causing the next call to dirstate
1059 to check if it was modified since the last time it was read,
1059 to check if it was modified since the last time it was read,
1060 rereading it if it has.
1060 rereading it if it has.
1061
1061
1062 This is different to dirstate.invalidate() that it doesn't always
1062 This is different to dirstate.invalidate() that it doesn't always
1063 rereads the dirstate. Use dirstate.invalidate() if you want to
1063 rereads the dirstate. Use dirstate.invalidate() if you want to
1064 explicitly read the dirstate again (i.e. restoring it to a previous
1064 explicitly read the dirstate again (i.e. restoring it to a previous
1065 known good state).'''
1065 known good state).'''
1066 if 'dirstate' in self.__dict__:
1066 if 'dirstate' in self.__dict__:
1067 for k in self.dirstate._filecache:
1067 for k in self.dirstate._filecache:
1068 try:
1068 try:
1069 delattr(self.dirstate, k)
1069 delattr(self.dirstate, k)
1070 except AttributeError:
1070 except AttributeError:
1071 pass
1071 pass
1072 delattr(self, 'dirstate')
1072 delattr(self, 'dirstate')
1073
1073
1074 def invalidate(self):
1074 def invalidate(self):
1075 for k in self._filecache:
1075 for k in self._filecache:
1076 # dirstate is invalidated separately in invalidatedirstate()
1076 # dirstate is invalidated separately in invalidatedirstate()
1077 if k == 'dirstate':
1077 if k == 'dirstate':
1078 continue
1078 continue
1079
1079
1080 try:
1080 try:
1081 delattr(self, k)
1081 delattr(self, k)
1082 except AttributeError:
1082 except AttributeError:
1083 pass
1083 pass
1084 self.invalidatecaches()
1084 self.invalidatecaches()
1085
1085
1086 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1086 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1087 try:
1087 try:
1088 l = lock.lock(lockname, 0, releasefn, desc=desc)
1088 l = lock.lock(lockname, 0, releasefn, desc=desc)
1089 except error.LockHeld, inst:
1089 except error.LockHeld, inst:
1090 if not wait:
1090 if not wait:
1091 raise
1091 raise
1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1093 (desc, inst.locker))
1093 (desc, inst.locker))
1094 # default to 600 seconds timeout
1094 # default to 600 seconds timeout
1095 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1095 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1096 releasefn, desc=desc)
1096 releasefn, desc=desc)
1097 if acquirefn:
1097 if acquirefn:
1098 acquirefn()
1098 acquirefn()
1099 return l
1099 return l
1100
1100
1101 def _afterlock(self, callback):
1101 def _afterlock(self, callback):
1102 """add a callback to the current repository lock.
1102 """add a callback to the current repository lock.
1103
1103
1104 The callback will be executed on lock release."""
1104 The callback will be executed on lock release."""
1105 l = self._lockref and self._lockref()
1105 l = self._lockref and self._lockref()
1106 if l:
1106 if l:
1107 l.postrelease.append(callback)
1107 l.postrelease.append(callback)
1108 else:
1108 else:
1109 callback()
1109 callback()
1110
1110
1111 def lock(self, wait=True):
1111 def lock(self, wait=True):
1112 '''Lock the repository store (.hg/store) and return a weak reference
1112 '''Lock the repository store (.hg/store) and return a weak reference
1113 to the lock. Use this before modifying the store (e.g. committing or
1113 to the lock. Use this before modifying the store (e.g. committing or
1114 stripping). If you are opening a transaction, get a lock as well.)'''
1114 stripping). If you are opening a transaction, get a lock as well.)'''
1115 l = self._lockref and self._lockref()
1115 l = self._lockref and self._lockref()
1116 if l is not None and l.held:
1116 if l is not None and l.held:
1117 l.lock()
1117 l.lock()
1118 return l
1118 return l
1119
1119
1120 def unlock():
1120 def unlock():
1121 self.store.write()
1121 self.store.write()
1122 if '_phasecache' in vars(self):
1122 if '_phasecache' in vars(self):
1123 self._phasecache.write()
1123 self._phasecache.write()
1124 for k, ce in self._filecache.items():
1124 for k, ce in self._filecache.items():
1125 if k == 'dirstate':
1125 if k == 'dirstate':
1126 continue
1126 continue
1127 ce.refresh()
1127 ce.refresh()
1128
1128
1129 l = self._lock(self.sjoin("lock"), wait, unlock,
1129 l = self._lock(self.sjoin("lock"), wait, unlock,
1130 self.invalidate, _('repository %s') % self.origroot)
1130 self.invalidate, _('repository %s') % self.origroot)
1131 self._lockref = weakref.ref(l)
1131 self._lockref = weakref.ref(l)
1132 return l
1132 return l
1133
1133
1134 def wlock(self, wait=True):
1134 def wlock(self, wait=True):
1135 '''Lock the non-store parts of the repository (everything under
1135 '''Lock the non-store parts of the repository (everything under
1136 .hg except .hg/store) and return a weak reference to the lock.
1136 .hg except .hg/store) and return a weak reference to the lock.
1137 Use this before modifying files in .hg.'''
1137 Use this before modifying files in .hg.'''
1138 l = self._wlockref and self._wlockref()
1138 l = self._wlockref and self._wlockref()
1139 if l is not None and l.held:
1139 if l is not None and l.held:
1140 l.lock()
1140 l.lock()
1141 return l
1141 return l
1142
1142
1143 def unlock():
1143 def unlock():
1144 self.dirstate.write()
1144 self.dirstate.write()
1145 ce = self._filecache.get('dirstate')
1145 ce = self._filecache.get('dirstate')
1146 if ce:
1146 if ce:
1147 ce.refresh()
1147 ce.refresh()
1148
1148
1149 l = self._lock(self.join("wlock"), wait, unlock,
1149 l = self._lock(self.join("wlock"), wait, unlock,
1150 self.invalidatedirstate, _('working directory of %s') %
1150 self.invalidatedirstate, _('working directory of %s') %
1151 self.origroot)
1151 self.origroot)
1152 self._wlockref = weakref.ref(l)
1152 self._wlockref = weakref.ref(l)
1153 return l
1153 return l
1154
1154
1155 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1155 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1156 """
1156 """
1157 commit an individual file as part of a larger transaction
1157 commit an individual file as part of a larger transaction
1158 """
1158 """
1159
1159
1160 fname = fctx.path()
1160 fname = fctx.path()
1161 text = fctx.data()
1161 text = fctx.data()
1162 flog = self.file(fname)
1162 flog = self.file(fname)
1163 fparent1 = manifest1.get(fname, nullid)
1163 fparent1 = manifest1.get(fname, nullid)
1164 fparent2 = fparent2o = manifest2.get(fname, nullid)
1164 fparent2 = fparent2o = manifest2.get(fname, nullid)
1165
1165
1166 meta = {}
1166 meta = {}
1167 copy = fctx.renamed()
1167 copy = fctx.renamed()
1168 if copy and copy[0] != fname:
1168 if copy and copy[0] != fname:
1169 # Mark the new revision of this file as a copy of another
1169 # Mark the new revision of this file as a copy of another
1170 # file. This copy data will effectively act as a parent
1170 # file. This copy data will effectively act as a parent
1171 # of this new revision. If this is a merge, the first
1171 # of this new revision. If this is a merge, the first
1172 # parent will be the nullid (meaning "look up the copy data")
1172 # parent will be the nullid (meaning "look up the copy data")
1173 # and the second one will be the other parent. For example:
1173 # and the second one will be the other parent. For example:
1174 #
1174 #
1175 # 0 --- 1 --- 3 rev1 changes file foo
1175 # 0 --- 1 --- 3 rev1 changes file foo
1176 # \ / rev2 renames foo to bar and changes it
1176 # \ / rev2 renames foo to bar and changes it
1177 # \- 2 -/ rev3 should have bar with all changes and
1177 # \- 2 -/ rev3 should have bar with all changes and
1178 # should record that bar descends from
1178 # should record that bar descends from
1179 # bar in rev2 and foo in rev1
1179 # bar in rev2 and foo in rev1
1180 #
1180 #
1181 # this allows this merge to succeed:
1181 # this allows this merge to succeed:
1182 #
1182 #
1183 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1183 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1184 # \ / merging rev3 and rev4 should use bar@rev2
1184 # \ / merging rev3 and rev4 should use bar@rev2
1185 # \- 2 --- 4 as the merge base
1185 # \- 2 --- 4 as the merge base
1186 #
1186 #
1187
1187
1188 cfname = copy[0]
1188 cfname = copy[0]
1189 crev = manifest1.get(cfname)
1189 crev = manifest1.get(cfname)
1190 newfparent = fparent2
1190 newfparent = fparent2
1191
1191
1192 if manifest2: # branch merge
1192 if manifest2: # branch merge
1193 if fparent2 == nullid or crev is None: # copied on remote side
1193 if fparent2 == nullid or crev is None: # copied on remote side
1194 if cfname in manifest2:
1194 if cfname in manifest2:
1195 crev = manifest2[cfname]
1195 crev = manifest2[cfname]
1196 newfparent = fparent1
1196 newfparent = fparent1
1197
1197
1198 # find source in nearest ancestor if we've lost track
1198 # find source in nearest ancestor if we've lost track
1199 if not crev:
1199 if not crev:
1200 self.ui.debug(" %s: searching for copy revision for %s\n" %
1200 self.ui.debug(" %s: searching for copy revision for %s\n" %
1201 (fname, cfname))
1201 (fname, cfname))
1202 for ancestor in self[None].ancestors():
1202 for ancestor in self[None].ancestors():
1203 if cfname in ancestor:
1203 if cfname in ancestor:
1204 crev = ancestor[cfname].filenode()
1204 crev = ancestor[cfname].filenode()
1205 break
1205 break
1206
1206
1207 if crev:
1207 if crev:
1208 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1208 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1209 meta["copy"] = cfname
1209 meta["copy"] = cfname
1210 meta["copyrev"] = hex(crev)
1210 meta["copyrev"] = hex(crev)
1211 fparent1, fparent2 = nullid, newfparent
1211 fparent1, fparent2 = nullid, newfparent
1212 else:
1212 else:
1213 self.ui.warn(_("warning: can't find ancestor for '%s' "
1213 self.ui.warn(_("warning: can't find ancestor for '%s' "
1214 "copied from '%s'!\n") % (fname, cfname))
1214 "copied from '%s'!\n") % (fname, cfname))
1215
1215
1216 elif fparent2 != nullid:
1216 elif fparent2 != nullid:
1217 # is one parent an ancestor of the other?
1217 # is one parent an ancestor of the other?
1218 fparentancestor = flog.ancestor(fparent1, fparent2)
1218 fparentancestor = flog.ancestor(fparent1, fparent2)
1219 if fparentancestor == fparent1:
1219 if fparentancestor == fparent1:
1220 fparent1, fparent2 = fparent2, nullid
1220 fparent1, fparent2 = fparent2, nullid
1221 elif fparentancestor == fparent2:
1221 elif fparentancestor == fparent2:
1222 fparent2 = nullid
1222 fparent2 = nullid
1223
1223
1224 # is the file changed?
1224 # is the file changed?
1225 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1225 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1226 changelist.append(fname)
1226 changelist.append(fname)
1227 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1227 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1228
1228
1229 # are just the flags changed during merge?
1229 # are just the flags changed during merge?
1230 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1230 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1231 changelist.append(fname)
1231 changelist.append(fname)
1232
1232
1233 return fparent1
1233 return fparent1
1234
1234
1235 def commit(self, text="", user=None, date=None, match=None, force=False,
1235 def commit(self, text="", user=None, date=None, match=None, force=False,
1236 editor=False, extra={}):
1236 editor=False, extra={}):
1237 """Add a new revision to current repository.
1237 """Add a new revision to current repository.
1238
1238
1239 Revision information is gathered from the working directory,
1239 Revision information is gathered from the working directory,
1240 match can be used to filter the committed files. If editor is
1240 match can be used to filter the committed files. If editor is
1241 supplied, it is called to get a commit message.
1241 supplied, it is called to get a commit message.
1242 """
1242 """
1243
1243
1244 def fail(f, msg):
1244 def fail(f, msg):
1245 raise util.Abort('%s: %s' % (f, msg))
1245 raise util.Abort('%s: %s' % (f, msg))
1246
1246
1247 if not match:
1247 if not match:
1248 match = matchmod.always(self.root, '')
1248 match = matchmod.always(self.root, '')
1249
1249
1250 if not force:
1250 if not force:
1251 vdirs = []
1251 vdirs = []
1252 match.dir = vdirs.append
1252 match.dir = vdirs.append
1253 match.bad = fail
1253 match.bad = fail
1254
1254
1255 wlock = self.wlock()
1255 wlock = self.wlock()
1256 try:
1256 try:
1257 wctx = self[None]
1257 wctx = self[None]
1258 merge = len(wctx.parents()) > 1
1258 merge = len(wctx.parents()) > 1
1259
1259
1260 if (not force and merge and match and
1260 if (not force and merge and match and
1261 (match.files() or match.anypats())):
1261 (match.files() or match.anypats())):
1262 raise util.Abort(_('cannot partially commit a merge '
1262 raise util.Abort(_('cannot partially commit a merge '
1263 '(do not specify files or patterns)'))
1263 '(do not specify files or patterns)'))
1264
1264
1265 changes = self.status(match=match, clean=force)
1265 changes = self.status(match=match, clean=force)
1266 if force:
1266 if force:
1267 changes[0].extend(changes[6]) # mq may commit unchanged files
1267 changes[0].extend(changes[6]) # mq may commit unchanged files
1268
1268
1269 # check subrepos
1269 # check subrepos
1270 subs = []
1270 subs = []
1271 commitsubs = set()
1271 commitsubs = set()
1272 newstate = wctx.substate.copy()
1272 newstate = wctx.substate.copy()
1273 # only manage subrepos and .hgsubstate if .hgsub is present
1273 # only manage subrepos and .hgsubstate if .hgsub is present
1274 if '.hgsub' in wctx:
1274 if '.hgsub' in wctx:
1275 # we'll decide whether to track this ourselves, thanks
1275 # we'll decide whether to track this ourselves, thanks
1276 if '.hgsubstate' in changes[0]:
1276 if '.hgsubstate' in changes[0]:
1277 changes[0].remove('.hgsubstate')
1277 changes[0].remove('.hgsubstate')
1278 if '.hgsubstate' in changes[2]:
1278 if '.hgsubstate' in changes[2]:
1279 changes[2].remove('.hgsubstate')
1279 changes[2].remove('.hgsubstate')
1280
1280
1281 # compare current state to last committed state
1281 # compare current state to last committed state
1282 # build new substate based on last committed state
1282 # build new substate based on last committed state
1283 oldstate = wctx.p1().substate
1283 oldstate = wctx.p1().substate
1284 for s in sorted(newstate.keys()):
1284 for s in sorted(newstate.keys()):
1285 if not match(s):
1285 if not match(s):
1286 # ignore working copy, use old state if present
1286 # ignore working copy, use old state if present
1287 if s in oldstate:
1287 if s in oldstate:
1288 newstate[s] = oldstate[s]
1288 newstate[s] = oldstate[s]
1289 continue
1289 continue
1290 if not force:
1290 if not force:
1291 raise util.Abort(
1291 raise util.Abort(
1292 _("commit with new subrepo %s excluded") % s)
1292 _("commit with new subrepo %s excluded") % s)
1293 if wctx.sub(s).dirty(True):
1293 if wctx.sub(s).dirty(True):
1294 if not self.ui.configbool('ui', 'commitsubrepos'):
1294 if not self.ui.configbool('ui', 'commitsubrepos'):
1295 raise util.Abort(
1295 raise util.Abort(
1296 _("uncommitted changes in subrepo %s") % s,
1296 _("uncommitted changes in subrepo %s") % s,
1297 hint=_("use --subrepos for recursive commit"))
1297 hint=_("use --subrepos for recursive commit"))
1298 subs.append(s)
1298 subs.append(s)
1299 commitsubs.add(s)
1299 commitsubs.add(s)
1300 else:
1300 else:
1301 bs = wctx.sub(s).basestate()
1301 bs = wctx.sub(s).basestate()
1302 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1302 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1303 if oldstate.get(s, (None, None, None))[1] != bs:
1303 if oldstate.get(s, (None, None, None))[1] != bs:
1304 subs.append(s)
1304 subs.append(s)
1305
1305
1306 # check for removed subrepos
1306 # check for removed subrepos
1307 for p in wctx.parents():
1307 for p in wctx.parents():
1308 r = [s for s in p.substate if s not in newstate]
1308 r = [s for s in p.substate if s not in newstate]
1309 subs += [s for s in r if match(s)]
1309 subs += [s for s in r if match(s)]
1310 if subs:
1310 if subs:
1311 if (not match('.hgsub') and
1311 if (not match('.hgsub') and
1312 '.hgsub' in (wctx.modified() + wctx.added())):
1312 '.hgsub' in (wctx.modified() + wctx.added())):
1313 raise util.Abort(
1313 raise util.Abort(
1314 _("can't commit subrepos without .hgsub"))
1314 _("can't commit subrepos without .hgsub"))
1315 changes[0].insert(0, '.hgsubstate')
1315 changes[0].insert(0, '.hgsubstate')
1316
1316
1317 elif '.hgsub' in changes[2]:
1317 elif '.hgsub' in changes[2]:
1318 # clean up .hgsubstate when .hgsub is removed
1318 # clean up .hgsubstate when .hgsub is removed
1319 if ('.hgsubstate' in wctx and
1319 if ('.hgsubstate' in wctx and
1320 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1320 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1321 changes[2].insert(0, '.hgsubstate')
1321 changes[2].insert(0, '.hgsubstate')
1322
1322
1323 # make sure all explicit patterns are matched
1323 # make sure all explicit patterns are matched
1324 if not force and match.files():
1324 if not force and match.files():
1325 matched = set(changes[0] + changes[1] + changes[2])
1325 matched = set(changes[0] + changes[1] + changes[2])
1326
1326
1327 for f in match.files():
1327 for f in match.files():
1328 f = self.dirstate.normalize(f)
1328 f = self.dirstate.normalize(f)
1329 if f == '.' or f in matched or f in wctx.substate:
1329 if f == '.' or f in matched or f in wctx.substate:
1330 continue
1330 continue
1331 if f in changes[3]: # missing
1331 if f in changes[3]: # missing
1332 fail(f, _('file not found!'))
1332 fail(f, _('file not found!'))
1333 if f in vdirs: # visited directory
1333 if f in vdirs: # visited directory
1334 d = f + '/'
1334 d = f + '/'
1335 for mf in matched:
1335 for mf in matched:
1336 if mf.startswith(d):
1336 if mf.startswith(d):
1337 break
1337 break
1338 else:
1338 else:
1339 fail(f, _("no match under directory!"))
1339 fail(f, _("no match under directory!"))
1340 elif f not in self.dirstate:
1340 elif f not in self.dirstate:
1341 fail(f, _("file not tracked!"))
1341 fail(f, _("file not tracked!"))
1342
1342
1343 if (not force and not extra.get("close") and not merge
1343 if (not force and not extra.get("close") and not merge
1344 and not (changes[0] or changes[1] or changes[2])
1344 and not (changes[0] or changes[1] or changes[2])
1345 and wctx.branch() == wctx.p1().branch()):
1345 and wctx.branch() == wctx.p1().branch()):
1346 return None
1346 return None
1347
1347
1348 if merge and changes[3]:
1348 if merge and changes[3]:
1349 raise util.Abort(_("cannot commit merge with missing files"))
1349 raise util.Abort(_("cannot commit merge with missing files"))
1350
1350
1351 ms = mergemod.mergestate(self)
1351 ms = mergemod.mergestate(self)
1352 for f in changes[0]:
1352 for f in changes[0]:
1353 if f in ms and ms[f] == 'u':
1353 if f in ms and ms[f] == 'u':
1354 raise util.Abort(_("unresolved merge conflicts "
1354 raise util.Abort(_("unresolved merge conflicts "
1355 "(see hg help resolve)"))
1355 "(see hg help resolve)"))
1356
1356
1357 cctx = context.workingctx(self, text, user, date, extra, changes)
1357 cctx = context.workingctx(self, text, user, date, extra, changes)
1358 if editor:
1358 if editor:
1359 cctx._text = editor(self, cctx, subs)
1359 cctx._text = editor(self, cctx, subs)
1360 edited = (text != cctx._text)
1360 edited = (text != cctx._text)
1361
1361
1362 # commit subs and write new state
1362 # commit subs and write new state
1363 if subs:
1363 if subs:
1364 for s in sorted(commitsubs):
1364 for s in sorted(commitsubs):
1365 sub = wctx.sub(s)
1365 sub = wctx.sub(s)
1366 self.ui.status(_('committing subrepository %s\n') %
1366 self.ui.status(_('committing subrepository %s\n') %
1367 subrepo.subrelpath(sub))
1367 subrepo.subrelpath(sub))
1368 sr = sub.commit(cctx._text, user, date)
1368 sr = sub.commit(cctx._text, user, date)
1369 newstate[s] = (newstate[s][0], sr)
1369 newstate[s] = (newstate[s][0], sr)
1370 subrepo.writestate(self, newstate)
1370 subrepo.writestate(self, newstate)
1371
1371
1372 # Save commit message in case this transaction gets rolled back
1372 # Save commit message in case this transaction gets rolled back
1373 # (e.g. by a pretxncommit hook). Leave the content alone on
1373 # (e.g. by a pretxncommit hook). Leave the content alone on
1374 # the assumption that the user will use the same editor again.
1374 # the assumption that the user will use the same editor again.
1375 msgfn = self.savecommitmessage(cctx._text)
1375 msgfn = self.savecommitmessage(cctx._text)
1376
1376
1377 p1, p2 = self.dirstate.parents()
1377 p1, p2 = self.dirstate.parents()
1378 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1378 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1379 try:
1379 try:
1380 self.hook("precommit", throw=True, parent1=hookp1,
1380 self.hook("precommit", throw=True, parent1=hookp1,
1381 parent2=hookp2)
1381 parent2=hookp2)
1382 ret = self.commitctx(cctx, True)
1382 ret = self.commitctx(cctx, True)
1383 except: # re-raises
1383 except: # re-raises
1384 if edited:
1384 if edited:
1385 self.ui.write(
1385 self.ui.write(
1386 _('note: commit message saved in %s\n') % msgfn)
1386 _('note: commit message saved in %s\n') % msgfn)
1387 raise
1387 raise
1388
1388
1389 # update bookmarks, dirstate and mergestate
1389 # update bookmarks, dirstate and mergestate
1390 bookmarks.update(self, [p1, p2], ret)
1390 bookmarks.update(self, [p1, p2], ret)
1391 for f in changes[0] + changes[1]:
1391 for f in changes[0] + changes[1]:
1392 self.dirstate.normal(f)
1392 self.dirstate.normal(f)
1393 for f in changes[2]:
1393 for f in changes[2]:
1394 self.dirstate.drop(f)
1394 self.dirstate.drop(f)
1395 self.dirstate.setparents(ret)
1395 self.dirstate.setparents(ret)
1396 ms.reset()
1396 ms.reset()
1397 finally:
1397 finally:
1398 wlock.release()
1398 wlock.release()
1399
1399
1400 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1400 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1401 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1401 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1402 self._afterlock(commithook)
1402 self._afterlock(commithook)
1403 return ret
1403 return ret
1404
1404
1405 def commitctx(self, ctx, error=False):
1405 def commitctx(self, ctx, error=False):
1406 """Add a new revision to current repository.
1406 """Add a new revision to current repository.
1407 Revision information is passed via the context argument.
1407 Revision information is passed via the context argument.
1408 """
1408 """
1409
1409
1410 tr = lock = None
1410 tr = lock = None
1411 removed = list(ctx.removed())
1411 removed = list(ctx.removed())
1412 p1, p2 = ctx.p1(), ctx.p2()
1412 p1, p2 = ctx.p1(), ctx.p2()
1413 user = ctx.user()
1413 user = ctx.user()
1414
1414
1415 lock = self.lock()
1415 lock = self.lock()
1416 try:
1416 try:
1417 tr = self.transaction("commit")
1417 tr = self.transaction("commit")
1418 trp = weakref.proxy(tr)
1418 trp = weakref.proxy(tr)
1419
1419
1420 if ctx.files():
1420 if ctx.files():
1421 m1 = p1.manifest().copy()
1421 m1 = p1.manifest().copy()
1422 m2 = p2.manifest()
1422 m2 = p2.manifest()
1423
1423
1424 # check in files
1424 # check in files
1425 new = {}
1425 new = {}
1426 changed = []
1426 changed = []
1427 linkrev = len(self)
1427 linkrev = len(self)
1428 for f in sorted(ctx.modified() + ctx.added()):
1428 for f in sorted(ctx.modified() + ctx.added()):
1429 self.ui.note(f + "\n")
1429 self.ui.note(f + "\n")
1430 try:
1430 try:
1431 fctx = ctx[f]
1431 fctx = ctx[f]
1432 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1432 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1433 changed)
1433 changed)
1434 m1.set(f, fctx.flags())
1434 m1.set(f, fctx.flags())
1435 except OSError, inst:
1435 except OSError, inst:
1436 self.ui.warn(_("trouble committing %s!\n") % f)
1436 self.ui.warn(_("trouble committing %s!\n") % f)
1437 raise
1437 raise
1438 except IOError, inst:
1438 except IOError, inst:
1439 errcode = getattr(inst, 'errno', errno.ENOENT)
1439 errcode = getattr(inst, 'errno', errno.ENOENT)
1440 if error or errcode and errcode != errno.ENOENT:
1440 if error or errcode and errcode != errno.ENOENT:
1441 self.ui.warn(_("trouble committing %s!\n") % f)
1441 self.ui.warn(_("trouble committing %s!\n") % f)
1442 raise
1442 raise
1443 else:
1443 else:
1444 removed.append(f)
1444 removed.append(f)
1445
1445
1446 # update manifest
1446 # update manifest
1447 m1.update(new)
1447 m1.update(new)
1448 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1448 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1449 drop = [f for f in removed if f in m1]
1449 drop = [f for f in removed if f in m1]
1450 for f in drop:
1450 for f in drop:
1451 del m1[f]
1451 del m1[f]
1452 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1452 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1453 p2.manifestnode(), (new, drop))
1453 p2.manifestnode(), (new, drop))
1454 files = changed + removed
1454 files = changed + removed
1455 else:
1455 else:
1456 mn = p1.manifestnode()
1456 mn = p1.manifestnode()
1457 files = []
1457 files = []
1458
1458
1459 # update changelog
1459 # update changelog
1460 self.changelog.delayupdate()
1460 self.changelog.delayupdate()
1461 n = self.changelog.add(mn, files, ctx.description(),
1461 n = self.changelog.add(mn, files, ctx.description(),
1462 trp, p1.node(), p2.node(),
1462 trp, p1.node(), p2.node(),
1463 user, ctx.date(), ctx.extra().copy())
1463 user, ctx.date(), ctx.extra().copy())
1464 p = lambda: self.changelog.writepending() and self.root or ""
1464 p = lambda: self.changelog.writepending() and self.root or ""
1465 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1465 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1466 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1466 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1467 parent2=xp2, pending=p)
1467 parent2=xp2, pending=p)
1468 self.changelog.finalize(trp)
1468 self.changelog.finalize(trp)
1469 # set the new commit is proper phase
1469 # set the new commit is proper phase
1470 targetphase = phases.newcommitphase(self.ui)
1470 targetphase = phases.newcommitphase(self.ui)
1471 if targetphase:
1471 if targetphase:
1472 # retract boundary do not alter parent changeset.
1472 # retract boundary do not alter parent changeset.
1473 # if a parent have higher the resulting phase will
1473 # if a parent have higher the resulting phase will
1474 # be compliant anyway
1474 # be compliant anyway
1475 #
1475 #
1476 # if minimal phase was 0 we don't need to retract anything
1476 # if minimal phase was 0 we don't need to retract anything
1477 phases.retractboundary(self, targetphase, [n])
1477 phases.retractboundary(self, targetphase, [n])
1478 tr.close()
1478 tr.close()
1479 self.updatebranchcache()
1479 self.updatebranchcache()
1480 return n
1480 return n
1481 finally:
1481 finally:
1482 if tr:
1482 if tr:
1483 tr.release()
1483 tr.release()
1484 lock.release()
1484 lock.release()
1485
1485
1486 def destroyed(self, newheadnodes=None):
1486 def destroyed(self, newheadnodes=None):
1487 '''Inform the repository that nodes have been destroyed.
1487 '''Inform the repository that nodes have been destroyed.
1488 Intended for use by strip and rollback, so there's a common
1488 Intended for use by strip and rollback, so there's a common
1489 place for anything that has to be done after destroying history.
1489 place for anything that has to be done after destroying history.
1490
1490
1491 If you know the branchheadcache was uptodate before nodes were removed
1491 If you know the branchheadcache was uptodate before nodes were removed
1492 and you also know the set of candidate new heads that may have resulted
1492 and you also know the set of candidate new heads that may have resulted
1493 from the destruction, you can set newheadnodes. This will enable the
1493 from the destruction, you can set newheadnodes. This will enable the
1494 code to update the branchheads cache, rather than having future code
1494 code to update the branchheads cache, rather than having future code
1495 decide it's invalid and regenerating it from scratch.
1495 decide it's invalid and regenerating it from scratch.
1496 '''
1496 '''
1497 # If we have info, newheadnodes, on how to update the branch cache, do
1497 # If we have info, newheadnodes, on how to update the branch cache, do
1498 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1498 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1499 # will be caught the next time it is read.
1499 # will be caught the next time it is read.
1500 if newheadnodes:
1500 if newheadnodes:
1501 tiprev = len(self) - 1
1501 tiprev = len(self) - 1
1502 ctxgen = (self[node] for node in newheadnodes
1502 ctxgen = (self[node] for node in newheadnodes
1503 if self.changelog.hasnode(node))
1503 if self.changelog.hasnode(node))
1504 self._updatebranchcache(self._branchcache, ctxgen)
1504 self._updatebranchcache(self._branchcache, ctxgen)
1505 self._writebranchcache(self._branchcache, self.changelog.tip(),
1505 self._writebranchcache(self._branchcache, self.changelog.tip(),
1506 tiprev)
1506 tiprev)
1507
1507
1508 # Ensure the persistent tag cache is updated. Doing it now
1508 # Ensure the persistent tag cache is updated. Doing it now
1509 # means that the tag cache only has to worry about destroyed
1509 # means that the tag cache only has to worry about destroyed
1510 # heads immediately after a strip/rollback. That in turn
1510 # heads immediately after a strip/rollback. That in turn
1511 # guarantees that "cachetip == currenttip" (comparing both rev
1511 # guarantees that "cachetip == currenttip" (comparing both rev
1512 # and node) always means no nodes have been added or destroyed.
1512 # and node) always means no nodes have been added or destroyed.
1513
1513
1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1515 # head, refresh the tag cache, then immediately add a new head.
1515 # head, refresh the tag cache, then immediately add a new head.
1516 # But I think doing it this way is necessary for the "instant
1516 # But I think doing it this way is necessary for the "instant
1517 # tag cache retrieval" case to work.
1517 # tag cache retrieval" case to work.
1518 self.invalidatecaches()
1518 self.invalidatecaches()
1519
1519
1520 # Discard all cache entries to force reloading everything.
1520 # Discard all cache entries to force reloading everything.
1521 self._filecache.clear()
1521 self._filecache.clear()
1522
1522
1523 def walk(self, match, node=None):
1523 def walk(self, match, node=None):
1524 '''
1524 '''
1525 walk recursively through the directory tree or a given
1525 walk recursively through the directory tree or a given
1526 changeset, finding all files matched by the match
1526 changeset, finding all files matched by the match
1527 function
1527 function
1528 '''
1528 '''
1529 return self[node].walk(match)
1529 return self[node].walk(match)
1530
1530
1531 def status(self, node1='.', node2=None, match=None,
1531 def status(self, node1='.', node2=None, match=None,
1532 ignored=False, clean=False, unknown=False,
1532 ignored=False, clean=False, unknown=False,
1533 listsubrepos=False):
1533 listsubrepos=False):
1534 """return status of files between two nodes or node and working
1534 """return status of files between two nodes or node and working
1535 directory.
1535 directory.
1536
1536
1537 If node1 is None, use the first dirstate parent instead.
1537 If node1 is None, use the first dirstate parent instead.
1538 If node2 is None, compare node1 with working directory.
1538 If node2 is None, compare node1 with working directory.
1539 """
1539 """
1540
1540
1541 def mfmatches(ctx):
1541 def mfmatches(ctx):
1542 mf = ctx.manifest().copy()
1542 mf = ctx.manifest().copy()
1543 if match.always():
1543 if match.always():
1544 return mf
1544 return mf
1545 for fn in mf.keys():
1545 for fn in mf.keys():
1546 if not match(fn):
1546 if not match(fn):
1547 del mf[fn]
1547 del mf[fn]
1548 return mf
1548 return mf
1549
1549
1550 if isinstance(node1, context.changectx):
1550 if isinstance(node1, context.changectx):
1551 ctx1 = node1
1551 ctx1 = node1
1552 else:
1552 else:
1553 ctx1 = self[node1]
1553 ctx1 = self[node1]
1554 if isinstance(node2, context.changectx):
1554 if isinstance(node2, context.changectx):
1555 ctx2 = node2
1555 ctx2 = node2
1556 else:
1556 else:
1557 ctx2 = self[node2]
1557 ctx2 = self[node2]
1558
1558
1559 working = ctx2.rev() is None
1559 working = ctx2.rev() is None
1560 parentworking = working and ctx1 == self['.']
1560 parentworking = working and ctx1 == self['.']
1561 match = match or matchmod.always(self.root, self.getcwd())
1561 match = match or matchmod.always(self.root, self.getcwd())
1562 listignored, listclean, listunknown = ignored, clean, unknown
1562 listignored, listclean, listunknown = ignored, clean, unknown
1563
1563
1564 # load earliest manifest first for caching reasons
1564 # load earliest manifest first for caching reasons
1565 if not working and ctx2.rev() < ctx1.rev():
1565 if not working and ctx2.rev() < ctx1.rev():
1566 ctx2.manifest()
1566 ctx2.manifest()
1567
1567
1568 if not parentworking:
1568 if not parentworking:
1569 def bad(f, msg):
1569 def bad(f, msg):
1570 # 'f' may be a directory pattern from 'match.files()',
1570 # 'f' may be a directory pattern from 'match.files()',
1571 # so 'f not in ctx1' is not enough
1571 # so 'f not in ctx1' is not enough
1572 if f not in ctx1 and f not in ctx1.dirs():
1572 if f not in ctx1 and f not in ctx1.dirs():
1573 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1573 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1574 match.bad = bad
1574 match.bad = bad
1575
1575
1576 if working: # we need to scan the working dir
1576 if working: # we need to scan the working dir
1577 subrepos = []
1577 subrepos = []
1578 if '.hgsub' in self.dirstate:
1578 if '.hgsub' in self.dirstate:
1579 subrepos = ctx2.substate.keys()
1579 subrepos = ctx2.substate.keys()
1580 s = self.dirstate.status(match, subrepos, listignored,
1580 s = self.dirstate.status(match, subrepos, listignored,
1581 listclean, listunknown)
1581 listclean, listunknown)
1582 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1582 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1583
1583
1584 # check for any possibly clean files
1584 # check for any possibly clean files
1585 if parentworking and cmp:
1585 if parentworking and cmp:
1586 fixup = []
1586 fixup = []
1587 # do a full compare of any files that might have changed
1587 # do a full compare of any files that might have changed
1588 for f in sorted(cmp):
1588 for f in sorted(cmp):
1589 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1589 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1590 or ctx1[f].cmp(ctx2[f])):
1590 or ctx1[f].cmp(ctx2[f])):
1591 modified.append(f)
1591 modified.append(f)
1592 else:
1592 else:
1593 fixup.append(f)
1593 fixup.append(f)
1594
1594
1595 # update dirstate for files that are actually clean
1595 # update dirstate for files that are actually clean
1596 if fixup:
1596 if fixup:
1597 if listclean:
1597 if listclean:
1598 clean += fixup
1598 clean += fixup
1599
1599
1600 try:
1600 try:
1601 # updating the dirstate is optional
1601 # updating the dirstate is optional
1602 # so we don't wait on the lock
1602 # so we don't wait on the lock
1603 wlock = self.wlock(False)
1603 wlock = self.wlock(False)
1604 try:
1604 try:
1605 for f in fixup:
1605 for f in fixup:
1606 self.dirstate.normal(f)
1606 self.dirstate.normal(f)
1607 finally:
1607 finally:
1608 wlock.release()
1608 wlock.release()
1609 except error.LockError:
1609 except error.LockError:
1610 pass
1610 pass
1611
1611
1612 if not parentworking:
1612 if not parentworking:
1613 mf1 = mfmatches(ctx1)
1613 mf1 = mfmatches(ctx1)
1614 if working:
1614 if working:
1615 # we are comparing working dir against non-parent
1615 # we are comparing working dir against non-parent
1616 # generate a pseudo-manifest for the working dir
1616 # generate a pseudo-manifest for the working dir
1617 mf2 = mfmatches(self['.'])
1617 mf2 = mfmatches(self['.'])
1618 for f in cmp + modified + added:
1618 for f in cmp + modified + added:
1619 mf2[f] = None
1619 mf2[f] = None
1620 mf2.set(f, ctx2.flags(f))
1620 mf2.set(f, ctx2.flags(f))
1621 for f in removed:
1621 for f in removed:
1622 if f in mf2:
1622 if f in mf2:
1623 del mf2[f]
1623 del mf2[f]
1624 else:
1624 else:
1625 # we are comparing two revisions
1625 # we are comparing two revisions
1626 deleted, unknown, ignored = [], [], []
1626 deleted, unknown, ignored = [], [], []
1627 mf2 = mfmatches(ctx2)
1627 mf2 = mfmatches(ctx2)
1628
1628
1629 modified, added, clean = [], [], []
1629 modified, added, clean = [], [], []
1630 withflags = mf1.withflags() | mf2.withflags()
1630 withflags = mf1.withflags() | mf2.withflags()
1631 for fn in mf2:
1631 for fn in mf2:
1632 if fn in mf1:
1632 if fn in mf1:
1633 if (fn not in deleted and
1633 if (fn not in deleted and
1634 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1634 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1635 (mf1[fn] != mf2[fn] and
1635 (mf1[fn] != mf2[fn] and
1636 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1636 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1637 modified.append(fn)
1637 modified.append(fn)
1638 elif listclean:
1638 elif listclean:
1639 clean.append(fn)
1639 clean.append(fn)
1640 del mf1[fn]
1640 del mf1[fn]
1641 elif fn not in deleted:
1641 elif fn not in deleted:
1642 added.append(fn)
1642 added.append(fn)
1643 removed = mf1.keys()
1643 removed = mf1.keys()
1644
1644
1645 if working and modified and not self.dirstate._checklink:
1645 if working and modified and not self.dirstate._checklink:
1646 # Symlink placeholders may get non-symlink-like contents
1646 # Symlink placeholders may get non-symlink-like contents
1647 # via user error or dereferencing by NFS or Samba servers,
1647 # via user error or dereferencing by NFS or Samba servers,
1648 # so we filter out any placeholders that don't look like a
1648 # so we filter out any placeholders that don't look like a
1649 # symlink
1649 # symlink
1650 sane = []
1650 sane = []
1651 for f in modified:
1651 for f in modified:
1652 if ctx2.flags(f) == 'l':
1652 if ctx2.flags(f) == 'l':
1653 d = ctx2[f].data()
1653 d = ctx2[f].data()
1654 if len(d) >= 1024 or '\n' in d or util.binary(d):
1654 if len(d) >= 1024 or '\n' in d or util.binary(d):
1655 self.ui.debug('ignoring suspect symlink placeholder'
1655 self.ui.debug('ignoring suspect symlink placeholder'
1656 ' "%s"\n' % f)
1656 ' "%s"\n' % f)
1657 continue
1657 continue
1658 sane.append(f)
1658 sane.append(f)
1659 modified = sane
1659 modified = sane
1660
1660
1661 r = modified, added, removed, deleted, unknown, ignored, clean
1661 r = modified, added, removed, deleted, unknown, ignored, clean
1662
1662
1663 if listsubrepos:
1663 if listsubrepos:
1664 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1664 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1665 if working:
1665 if working:
1666 rev2 = None
1666 rev2 = None
1667 else:
1667 else:
1668 rev2 = ctx2.substate[subpath][1]
1668 rev2 = ctx2.substate[subpath][1]
1669 try:
1669 try:
1670 submatch = matchmod.narrowmatcher(subpath, match)
1670 submatch = matchmod.narrowmatcher(subpath, match)
1671 s = sub.status(rev2, match=submatch, ignored=listignored,
1671 s = sub.status(rev2, match=submatch, ignored=listignored,
1672 clean=listclean, unknown=listunknown,
1672 clean=listclean, unknown=listunknown,
1673 listsubrepos=True)
1673 listsubrepos=True)
1674 for rfiles, sfiles in zip(r, s):
1674 for rfiles, sfiles in zip(r, s):
1675 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1675 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1676 except error.LookupError:
1676 except error.LookupError:
1677 self.ui.status(_("skipping missing subrepository: %s\n")
1677 self.ui.status(_("skipping missing subrepository: %s\n")
1678 % subpath)
1678 % subpath)
1679
1679
1680 for l in r:
1680 for l in r:
1681 l.sort()
1681 l.sort()
1682 return r
1682 return r
1683
1683
1684 def heads(self, start=None):
1684 def heads(self, start=None):
1685 heads = self.changelog.heads(start)
1685 heads = self.changelog.heads(start)
1686 # sort the output in rev descending order
1686 # sort the output in rev descending order
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1688
1688
1689 def branchheads(self, branch=None, start=None, closed=False):
1689 def branchheads(self, branch=None, start=None, closed=False):
1690 '''return a (possibly filtered) list of heads for the given branch
1690 '''return a (possibly filtered) list of heads for the given branch
1691
1691
1692 Heads are returned in topological order, from newest to oldest.
1692 Heads are returned in topological order, from newest to oldest.
1693 If branch is None, use the dirstate branch.
1693 If branch is None, use the dirstate branch.
1694 If start is not None, return only heads reachable from start.
1694 If start is not None, return only heads reachable from start.
1695 If closed is True, return heads that are marked as closed as well.
1695 If closed is True, return heads that are marked as closed as well.
1696 '''
1696 '''
1697 if branch is None:
1697 if branch is None:
1698 branch = self[None].branch()
1698 branch = self[None].branch()
1699 branches = self.branchmap()
1699 branches = self.branchmap()
1700 if branch not in branches:
1700 if branch not in branches:
1701 return []
1701 return []
1702 # the cache returns heads ordered lowest to highest
1702 # the cache returns heads ordered lowest to highest
1703 bheads = list(reversed(branches[branch]))
1703 bheads = list(reversed(branches[branch]))
1704 if start is not None:
1704 if start is not None:
1705 # filter out the heads that cannot be reached from startrev
1705 # filter out the heads that cannot be reached from startrev
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 bheads = [h for h in bheads if h in fbheads]
1707 bheads = [h for h in bheads if h in fbheads]
1708 if not closed:
1708 if not closed:
1709 bheads = [h for h in bheads if not self[h].closesbranch()]
1709 bheads = [h for h in bheads if not self[h].closesbranch()]
1710 return bheads
1710 return bheads
1711
1711
1712 def branches(self, nodes):
1712 def branches(self, nodes):
1713 if not nodes:
1713 if not nodes:
1714 nodes = [self.changelog.tip()]
1714 nodes = [self.changelog.tip()]
1715 b = []
1715 b = []
1716 for n in nodes:
1716 for n in nodes:
1717 t = n
1717 t = n
1718 while True:
1718 while True:
1719 p = self.changelog.parents(n)
1719 p = self.changelog.parents(n)
1720 if p[1] != nullid or p[0] == nullid:
1720 if p[1] != nullid or p[0] == nullid:
1721 b.append((t, n, p[0], p[1]))
1721 b.append((t, n, p[0], p[1]))
1722 break
1722 break
1723 n = p[0]
1723 n = p[0]
1724 return b
1724 return b
1725
1725
1726 def between(self, pairs):
1726 def between(self, pairs):
1727 r = []
1727 r = []
1728
1728
1729 for top, bottom in pairs:
1729 for top, bottom in pairs:
1730 n, l, i = top, [], 0
1730 n, l, i = top, [], 0
1731 f = 1
1731 f = 1
1732
1732
1733 while n != bottom and n != nullid:
1733 while n != bottom and n != nullid:
1734 p = self.changelog.parents(n)[0]
1734 p = self.changelog.parents(n)[0]
1735 if i == f:
1735 if i == f:
1736 l.append(n)
1736 l.append(n)
1737 f = f * 2
1737 f = f * 2
1738 n = p
1738 n = p
1739 i += 1
1739 i += 1
1740
1740
1741 r.append(l)
1741 r.append(l)
1742
1742
1743 return r
1743 return r
1744
1744
1745 def pull(self, remote, heads=None, force=False):
1745 def pull(self, remote, heads=None, force=False):
1746 # don't open transaction for nothing or you break future useful
1746 # don't open transaction for nothing or you break future useful
1747 # rollback call
1747 # rollback call
1748 tr = None
1748 tr = None
1749 trname = 'pull\n' + util.hidepassword(remote.url())
1749 trname = 'pull\n' + util.hidepassword(remote.url())
1750 lock = self.lock()
1750 lock = self.lock()
1751 try:
1751 try:
1752 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1752 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1753 force=force)
1753 force=force)
1754 common, fetch, rheads = tmp
1754 common, fetch, rheads = tmp
1755 if not fetch:
1755 if not fetch:
1756 self.ui.status(_("no changes found\n"))
1756 self.ui.status(_("no changes found\n"))
1757 added = []
1757 added = []
1758 result = 0
1758 result = 0
1759 else:
1759 else:
1760 tr = self.transaction(trname)
1760 tr = self.transaction(trname)
1761 if heads is None and list(common) == [nullid]:
1761 if heads is None and list(common) == [nullid]:
1762 self.ui.status(_("requesting all changes\n"))
1762 self.ui.status(_("requesting all changes\n"))
1763 elif heads is None and remote.capable('changegroupsubset'):
1763 elif heads is None and remote.capable('changegroupsubset'):
1764 # issue1320, avoid a race if remote changed after discovery
1764 # issue1320, avoid a race if remote changed after discovery
1765 heads = rheads
1765 heads = rheads
1766
1766
1767 if remote.capable('getbundle'):
1767 if remote.capable('getbundle'):
1768 cg = remote.getbundle('pull', common=common,
1768 cg = remote.getbundle('pull', common=common,
1769 heads=heads or rheads)
1769 heads=heads or rheads)
1770 elif heads is None:
1770 elif heads is None:
1771 cg = remote.changegroup(fetch, 'pull')
1771 cg = remote.changegroup(fetch, 'pull')
1772 elif not remote.capable('changegroupsubset'):
1772 elif not remote.capable('changegroupsubset'):
1773 raise util.Abort(_("partial pull cannot be done because "
1773 raise util.Abort(_("partial pull cannot be done because "
1774 "other repository doesn't support "
1774 "other repository doesn't support "
1775 "changegroupsubset."))
1775 "changegroupsubset."))
1776 else:
1776 else:
1777 cg = remote.changegroupsubset(fetch, heads, 'pull')
1777 cg = remote.changegroupsubset(fetch, heads, 'pull')
1778 clstart = len(self.changelog)
1778 clstart = len(self.changelog)
1779 result = self.addchangegroup(cg, 'pull', remote.url())
1779 result = self.addchangegroup(cg, 'pull', remote.url())
1780 clend = len(self.changelog)
1780 clend = len(self.changelog)
1781 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1781 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1782
1782
1783 # compute target subset
1783 # compute target subset
1784 if heads is None:
1784 if heads is None:
1785 # We pulled every thing possible
1785 # We pulled every thing possible
1786 # sync on everything common
1786 # sync on everything common
1787 subset = common + added
1787 subset = common + added
1788 else:
1788 else:
1789 # We pulled a specific subset
1789 # We pulled a specific subset
1790 # sync on this subset
1790 # sync on this subset
1791 subset = heads
1791 subset = heads
1792
1792
1793 # Get remote phases data from remote
1793 # Get remote phases data from remote
1794 remotephases = remote.listkeys('phases')
1794 remotephases = remote.listkeys('phases')
1795 publishing = bool(remotephases.get('publishing', False))
1795 publishing = bool(remotephases.get('publishing', False))
1796 if remotephases and not publishing:
1796 if remotephases and not publishing:
1797 # remote is new and unpublishing
1797 # remote is new and unpublishing
1798 pheads, _dr = phases.analyzeremotephases(self, subset,
1798 pheads, _dr = phases.analyzeremotephases(self, subset,
1799 remotephases)
1799 remotephases)
1800 phases.advanceboundary(self, phases.public, pheads)
1800 phases.advanceboundary(self, phases.public, pheads)
1801 phases.advanceboundary(self, phases.draft, subset)
1801 phases.advanceboundary(self, phases.draft, subset)
1802 else:
1802 else:
1803 # Remote is old or publishing all common changesets
1803 # Remote is old or publishing all common changesets
1804 # should be seen as public
1804 # should be seen as public
1805 phases.advanceboundary(self, phases.public, subset)
1805 phases.advanceboundary(self, phases.public, subset)
1806
1806
1807 if obsolete._enabled:
1807 if obsolete._enabled:
1808 self.ui.debug('fetching remote obsolete markers')
1808 self.ui.debug('fetching remote obsolete markers')
1809 remoteobs = remote.listkeys('obsolete')
1809 remoteobs = remote.listkeys('obsolete')
1810 if 'dump0' in remoteobs:
1810 if 'dump0' in remoteobs:
1811 if tr is None:
1811 if tr is None:
1812 tr = self.transaction(trname)
1812 tr = self.transaction(trname)
1813 for key in sorted(remoteobs, reverse=True):
1813 for key in sorted(remoteobs, reverse=True):
1814 if key.startswith('dump'):
1814 if key.startswith('dump'):
1815 data = base85.b85decode(remoteobs[key])
1815 data = base85.b85decode(remoteobs[key])
1816 self.obsstore.mergemarkers(tr, data)
1816 self.obsstore.mergemarkers(tr, data)
1817 if tr is not None:
1817 if tr is not None:
1818 tr.close()
1818 tr.close()
1819 finally:
1819 finally:
1820 if tr is not None:
1820 if tr is not None:
1821 tr.release()
1821 tr.release()
1822 lock.release()
1822 lock.release()
1823
1823
1824 return result
1824 return result
1825
1825
1826 def checkpush(self, force, revs):
1826 def checkpush(self, force, revs):
1827 """Extensions can override this function if additional checks have
1827 """Extensions can override this function if additional checks have
1828 to be performed before pushing, or call it if they override push
1828 to be performed before pushing, or call it if they override push
1829 command.
1829 command.
1830 """
1830 """
1831 pass
1831 pass
1832
1832
1833 def push(self, remote, force=False, revs=None, newbranch=False):
1833 def push(self, remote, force=False, revs=None, newbranch=False):
1834 '''Push outgoing changesets (limited by revs) from the current
1834 '''Push outgoing changesets (limited by revs) from the current
1835 repository to remote. Return an integer:
1835 repository to remote. Return an integer:
1836 - None means nothing to push
1836 - None means nothing to push
1837 - 0 means HTTP error
1837 - 0 means HTTP error
1838 - 1 means we pushed and remote head count is unchanged *or*
1838 - 1 means we pushed and remote head count is unchanged *or*
1839 we have outgoing changesets but refused to push
1839 we have outgoing changesets but refused to push
1840 - other values as described by addchangegroup()
1840 - other values as described by addchangegroup()
1841 '''
1841 '''
1842 # there are two ways to push to remote repo:
1842 # there are two ways to push to remote repo:
1843 #
1843 #
1844 # addchangegroup assumes local user can lock remote
1844 # addchangegroup assumes local user can lock remote
1845 # repo (local filesystem, old ssh servers).
1845 # repo (local filesystem, old ssh servers).
1846 #
1846 #
1847 # unbundle assumes local user cannot lock remote repo (new ssh
1847 # unbundle assumes local user cannot lock remote repo (new ssh
1848 # servers, http servers).
1848 # servers, http servers).
1849
1849
1850 if not remote.canpush():
1850 if not remote.canpush():
1851 raise util.Abort(_("destination does not support push"))
1851 raise util.Abort(_("destination does not support push"))
1852 # get local lock as we might write phase data
1852 # get local lock as we might write phase data
1853 locallock = self.lock()
1853 locallock = self.lock()
1854 try:
1854 try:
1855 self.checkpush(force, revs)
1855 self.checkpush(force, revs)
1856 lock = None
1856 lock = None
1857 unbundle = remote.capable('unbundle')
1857 unbundle = remote.capable('unbundle')
1858 if not unbundle:
1858 if not unbundle:
1859 lock = remote.lock()
1859 lock = remote.lock()
1860 try:
1860 try:
1861 # discovery
1861 # discovery
1862 fci = discovery.findcommonincoming
1862 fci = discovery.findcommonincoming
1863 commoninc = fci(self, remote, force=force)
1863 commoninc = fci(self, remote, force=force)
1864 common, inc, remoteheads = commoninc
1864 common, inc, remoteheads = commoninc
1865 fco = discovery.findcommonoutgoing
1865 fco = discovery.findcommonoutgoing
1866 outgoing = fco(self, remote, onlyheads=revs,
1866 outgoing = fco(self, remote, onlyheads=revs,
1867 commoninc=commoninc, force=force)
1867 commoninc=commoninc, force=force)
1868
1868
1869
1869
1870 if not outgoing.missing:
1870 if not outgoing.missing:
1871 # nothing to push
1871 # nothing to push
1872 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1872 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1873 ret = None
1873 ret = None
1874 else:
1874 else:
1875 # something to push
1875 # something to push
1876 if not force:
1876 if not force:
1877 # if self.obsstore == False --> no obsolete
1877 # if self.obsstore == False --> no obsolete
1878 # then, save the iteration
1878 # then, save the iteration
1879 if self.obsstore:
1879 if self.obsstore:
1880 # this message are here for 80 char limit reason
1880 # this message are here for 80 char limit reason
1881 mso = _("push includes an obsolete changeset: %s!")
1881 mso = _("push includes an obsolete changeset: %s!")
1882 msu = _("push includes an unstable changeset: %s!")
1882 msu = _("push includes an unstable changeset: %s!")
1883 # If we are to push if there is at least one
1883 # If we are to push if there is at least one
1884 # obsolete or unstable changeset in missing, at
1884 # obsolete or unstable changeset in missing, at
1885 # least one of the missinghead will be obsolete or
1885 # least one of the missinghead will be obsolete or
1886 # unstable. So checking heads only is ok
1886 # unstable. So checking heads only is ok
1887 for node in outgoing.missingheads:
1887 for node in outgoing.missingheads:
1888 ctx = self[node]
1888 ctx = self[node]
1889 if ctx.obsolete():
1889 if ctx.obsolete():
1890 raise util.Abort(_(mso) % ctx)
1890 raise util.Abort(_(mso) % ctx)
1891 elif ctx.unstable():
1891 elif ctx.unstable():
1892 raise util.Abort(_(msu) % ctx)
1892 raise util.Abort(_(msu) % ctx)
1893 discovery.checkheads(self, remote, outgoing,
1893 discovery.checkheads(self, remote, outgoing,
1894 remoteheads, newbranch,
1894 remoteheads, newbranch,
1895 bool(inc))
1895 bool(inc))
1896
1896
1897 # create a changegroup from local
1897 # create a changegroup from local
1898 if revs is None and not outgoing.excluded:
1898 if revs is None and not outgoing.excluded:
1899 # push everything,
1899 # push everything,
1900 # use the fast path, no race possible on push
1900 # use the fast path, no race possible on push
1901 cg = self._changegroup(outgoing.missing, 'push')
1901 cg = self._changegroup(outgoing.missing, 'push')
1902 else:
1902 else:
1903 cg = self.getlocalbundle('push', outgoing)
1903 cg = self.getlocalbundle('push', outgoing)
1904
1904
1905 # apply changegroup to remote
1905 # apply changegroup to remote
1906 if unbundle:
1906 if unbundle:
1907 # local repo finds heads on server, finds out what
1907 # local repo finds heads on server, finds out what
1908 # revs it must push. once revs transferred, if server
1908 # revs it must push. once revs transferred, if server
1909 # finds it has different heads (someone else won
1909 # finds it has different heads (someone else won
1910 # commit/push race), server aborts.
1910 # commit/push race), server aborts.
1911 if force:
1911 if force:
1912 remoteheads = ['force']
1912 remoteheads = ['force']
1913 # ssh: return remote's addchangegroup()
1913 # ssh: return remote's addchangegroup()
1914 # http: return remote's addchangegroup() or 0 for error
1914 # http: return remote's addchangegroup() or 0 for error
1915 ret = remote.unbundle(cg, remoteheads, 'push')
1915 ret = remote.unbundle(cg, remoteheads, 'push')
1916 else:
1916 else:
1917 # we return an integer indicating remote head count
1917 # we return an integer indicating remote head count
1918 # change
1918 # change
1919 ret = remote.addchangegroup(cg, 'push', self.url())
1919 ret = remote.addchangegroup(cg, 'push', self.url())
1920
1920
1921 if ret:
1921 if ret:
1922 # push succeed, synchronize target of the push
1922 # push succeed, synchronize target of the push
1923 cheads = outgoing.missingheads
1923 cheads = outgoing.missingheads
1924 elif revs is None:
1924 elif revs is None:
1925 # All out push fails. synchronize all common
1925 # All out push fails. synchronize all common
1926 cheads = outgoing.commonheads
1926 cheads = outgoing.commonheads
1927 else:
1927 else:
1928 # I want cheads = heads(::missingheads and ::commonheads)
1928 # I want cheads = heads(::missingheads and ::commonheads)
1929 # (missingheads is revs with secret changeset filtered out)
1929 # (missingheads is revs with secret changeset filtered out)
1930 #
1930 #
1931 # This can be expressed as:
1931 # This can be expressed as:
1932 # cheads = ( (missingheads and ::commonheads)
1932 # cheads = ( (missingheads and ::commonheads)
1933 # + (commonheads and ::missingheads))"
1933 # + (commonheads and ::missingheads))"
1934 # )
1934 # )
1935 #
1935 #
1936 # while trying to push we already computed the following:
1936 # while trying to push we already computed the following:
1937 # common = (::commonheads)
1937 # common = (::commonheads)
1938 # missing = ((commonheads::missingheads) - commonheads)
1938 # missing = ((commonheads::missingheads) - commonheads)
1939 #
1939 #
1940 # We can pick:
1940 # We can pick:
1941 # * missingheads part of common (::commonheads)
1941 # * missingheads part of common (::commonheads)
1942 common = set(outgoing.common)
1942 common = set(outgoing.common)
1943 cheads = [node for node in revs if node in common]
1943 cheads = [node for node in revs if node in common]
1944 # and
1944 # and
1945 # * commonheads parents on missing
1945 # * commonheads parents on missing
1946 revset = self.set('%ln and parents(roots(%ln))',
1946 revset = self.set('%ln and parents(roots(%ln))',
1947 outgoing.commonheads,
1947 outgoing.commonheads,
1948 outgoing.missing)
1948 outgoing.missing)
1949 cheads.extend(c.node() for c in revset)
1949 cheads.extend(c.node() for c in revset)
1950 # even when we don't push, exchanging phase data is useful
1950 # even when we don't push, exchanging phase data is useful
1951 remotephases = remote.listkeys('phases')
1951 remotephases = remote.listkeys('phases')
1952 if not remotephases: # old server or public only repo
1952 if not remotephases: # old server or public only repo
1953 phases.advanceboundary(self, phases.public, cheads)
1953 phases.advanceboundary(self, phases.public, cheads)
1954 # don't push any phase data as there is nothing to push
1954 # don't push any phase data as there is nothing to push
1955 else:
1955 else:
1956 ana = phases.analyzeremotephases(self, cheads, remotephases)
1956 ana = phases.analyzeremotephases(self, cheads, remotephases)
1957 pheads, droots = ana
1957 pheads, droots = ana
1958 ### Apply remote phase on local
1958 ### Apply remote phase on local
1959 if remotephases.get('publishing', False):
1959 if remotephases.get('publishing', False):
1960 phases.advanceboundary(self, phases.public, cheads)
1960 phases.advanceboundary(self, phases.public, cheads)
1961 else: # publish = False
1961 else: # publish = False
1962 phases.advanceboundary(self, phases.public, pheads)
1962 phases.advanceboundary(self, phases.public, pheads)
1963 phases.advanceboundary(self, phases.draft, cheads)
1963 phases.advanceboundary(self, phases.draft, cheads)
1964 ### Apply local phase on remote
1964 ### Apply local phase on remote
1965
1965
1966 # Get the list of all revs draft on remote by public here.
1966 # Get the list of all revs draft on remote by public here.
1967 # XXX Beware that revset break if droots is not strictly
1967 # XXX Beware that revset break if droots is not strictly
1968 # XXX root we may want to ensure it is but it is costly
1968 # XXX root we may want to ensure it is but it is costly
1969 outdated = self.set('heads((%ln::%ln) and public())',
1969 outdated = self.set('heads((%ln::%ln) and public())',
1970 droots, cheads)
1970 droots, cheads)
1971 for newremotehead in outdated:
1971 for newremotehead in outdated:
1972 r = remote.pushkey('phases',
1972 r = remote.pushkey('phases',
1973 newremotehead.hex(),
1973 newremotehead.hex(),
1974 str(phases.draft),
1974 str(phases.draft),
1975 str(phases.public))
1975 str(phases.public))
1976 if not r:
1976 if not r:
1977 self.ui.warn(_('updating %s to public failed!\n')
1977 self.ui.warn(_('updating %s to public failed!\n')
1978 % newremotehead)
1978 % newremotehead)
1979 self.ui.debug('try to push obsolete markers to remote\n')
1979 self.ui.debug('try to push obsolete markers to remote\n')
1980 if (obsolete._enabled and self.obsstore and
1980 if (obsolete._enabled and self.obsstore and
1981 'obsolete' in remote.listkeys('namespaces')):
1981 'obsolete' in remote.listkeys('namespaces')):
1982 rslts = []
1982 rslts = []
1983 remotedata = self.listkeys('obsolete')
1983 remotedata = self.listkeys('obsolete')
1984 for key in sorted(remotedata, reverse=True):
1984 for key in sorted(remotedata, reverse=True):
1985 # reverse sort to ensure we end with dump0
1985 # reverse sort to ensure we end with dump0
1986 data = remotedata[key]
1986 data = remotedata[key]
1987 rslts.append(remote.pushkey('obsolete', key, '', data))
1987 rslts.append(remote.pushkey('obsolete', key, '', data))
1988 if [r for r in rslts if not r]:
1988 if [r for r in rslts if not r]:
1989 msg = _('failed to push some obsolete markers!\n')
1989 msg = _('failed to push some obsolete markers!\n')
1990 self.ui.warn(msg)
1990 self.ui.warn(msg)
1991 finally:
1991 finally:
1992 if lock is not None:
1992 if lock is not None:
1993 lock.release()
1993 lock.release()
1994 finally:
1994 finally:
1995 locallock.release()
1995 locallock.release()
1996
1996
1997 self.ui.debug("checking for updated bookmarks\n")
1997 self.ui.debug("checking for updated bookmarks\n")
1998 rb = remote.listkeys('bookmarks')
1998 rb = remote.listkeys('bookmarks')
1999 for k in rb.keys():
1999 for k in rb.keys():
2000 if k in self._bookmarks:
2000 if k in self._bookmarks:
2001 nr, nl = rb[k], hex(self._bookmarks[k])
2001 nr, nl = rb[k], hex(self._bookmarks[k])
2002 if nr in self:
2002 if nr in self:
2003 cr = self[nr]
2003 cr = self[nr]
2004 cl = self[nl]
2004 cl = self[nl]
2005 if bookmarks.validdest(self, cr, cl):
2005 if bookmarks.validdest(self, cr, cl):
2006 r = remote.pushkey('bookmarks', k, nr, nl)
2006 r = remote.pushkey('bookmarks', k, nr, nl)
2007 if r:
2007 if r:
2008 self.ui.status(_("updating bookmark %s\n") % k)
2008 self.ui.status(_("updating bookmark %s\n") % k)
2009 else:
2009 else:
2010 self.ui.warn(_('updating bookmark %s'
2010 self.ui.warn(_('updating bookmark %s'
2011 ' failed!\n') % k)
2011 ' failed!\n') % k)
2012
2012
2013 return ret
2013 return ret
2014
2014
2015 def changegroupinfo(self, nodes, source):
2015 def changegroupinfo(self, nodes, source):
2016 if self.ui.verbose or source == 'bundle':
2016 if self.ui.verbose or source == 'bundle':
2017 self.ui.status(_("%d changesets found\n") % len(nodes))
2017 self.ui.status(_("%d changesets found\n") % len(nodes))
2018 if self.ui.debugflag:
2018 if self.ui.debugflag:
2019 self.ui.debug("list of changesets:\n")
2019 self.ui.debug("list of changesets:\n")
2020 for node in nodes:
2020 for node in nodes:
2021 self.ui.debug("%s\n" % hex(node))
2021 self.ui.debug("%s\n" % hex(node))
2022
2022
2023 def changegroupsubset(self, bases, heads, source):
2023 def changegroupsubset(self, bases, heads, source):
2024 """Compute a changegroup consisting of all the nodes that are
2024 """Compute a changegroup consisting of all the nodes that are
2025 descendants of any of the bases and ancestors of any of the heads.
2025 descendants of any of the bases and ancestors of any of the heads.
2026 Return a chunkbuffer object whose read() method will return
2026 Return a chunkbuffer object whose read() method will return
2027 successive changegroup chunks.
2027 successive changegroup chunks.
2028
2028
2029 It is fairly complex as determining which filenodes and which
2029 It is fairly complex as determining which filenodes and which
2030 manifest nodes need to be included for the changeset to be complete
2030 manifest nodes need to be included for the changeset to be complete
2031 is non-trivial.
2031 is non-trivial.
2032
2032
2033 Another wrinkle is doing the reverse, figuring out which changeset in
2033 Another wrinkle is doing the reverse, figuring out which changeset in
2034 the changegroup a particular filenode or manifestnode belongs to.
2034 the changegroup a particular filenode or manifestnode belongs to.
2035 """
2035 """
2036 cl = self.changelog
2036 cl = self.changelog
2037 if not bases:
2037 if not bases:
2038 bases = [nullid]
2038 bases = [nullid]
2039 csets, bases, heads = cl.nodesbetween(bases, heads)
2039 csets, bases, heads = cl.nodesbetween(bases, heads)
2040 # We assume that all ancestors of bases are known
2040 # We assume that all ancestors of bases are known
2041 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2041 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2042 return self._changegroupsubset(common, csets, heads, source)
2042 return self._changegroupsubset(common, csets, heads, source)
2043
2043
2044 def getlocalbundle(self, source, outgoing):
2044 def getlocalbundle(self, source, outgoing):
2045 """Like getbundle, but taking a discovery.outgoing as an argument.
2045 """Like getbundle, but taking a discovery.outgoing as an argument.
2046
2046
2047 This is only implemented for local repos and reuses potentially
2047 This is only implemented for local repos and reuses potentially
2048 precomputed sets in outgoing."""
2048 precomputed sets in outgoing."""
2049 if not outgoing.missing:
2049 if not outgoing.missing:
2050 return None
2050 return None
2051 return self._changegroupsubset(outgoing.common,
2051 return self._changegroupsubset(outgoing.common,
2052 outgoing.missing,
2052 outgoing.missing,
2053 outgoing.missingheads,
2053 outgoing.missingheads,
2054 source)
2054 source)
2055
2055
2056 def getbundle(self, source, heads=None, common=None):
2056 def getbundle(self, source, heads=None, common=None):
2057 """Like changegroupsubset, but returns the set difference between the
2057 """Like changegroupsubset, but returns the set difference between the
2058 ancestors of heads and the ancestors common.
2058 ancestors of heads and the ancestors common.
2059
2059
2060 If heads is None, use the local heads. If common is None, use [nullid].
2060 If heads is None, use the local heads. If common is None, use [nullid].
2061
2061
2062 The nodes in common might not all be known locally due to the way the
2062 The nodes in common might not all be known locally due to the way the
2063 current discovery protocol works.
2063 current discovery protocol works.
2064 """
2064 """
2065 cl = self.changelog
2065 cl = self.changelog
2066 if common:
2066 if common:
2067 nm = cl.nodemap
2067 nm = cl.nodemap
2068 common = [n for n in common if n in nm]
2068 common = [n for n in common if n in nm]
2069 else:
2069 else:
2070 common = [nullid]
2070 common = [nullid]
2071 if not heads:
2071 if not heads:
2072 heads = cl.heads()
2072 heads = cl.heads()
2073 return self.getlocalbundle(source,
2073 return self.getlocalbundle(source,
2074 discovery.outgoing(cl, common, heads))
2074 discovery.outgoing(cl, common, heads))
2075
2075
2076 def _changegroupsubset(self, commonrevs, csets, heads, source):
2076 def _changegroupsubset(self, commonrevs, csets, heads, source):
2077
2077
2078 cl = self.changelog
2078 cl = self.changelog
2079 mf = self.manifest
2079 mf = self.manifest
2080 mfs = {} # needed manifests
2080 mfs = {} # needed manifests
2081 fnodes = {} # needed file nodes
2081 fnodes = {} # needed file nodes
2082 changedfiles = set()
2082 changedfiles = set()
2083 fstate = ['', {}]
2083 fstate = ['', {}]
2084 count = [0, 0]
2084 count = [0, 0]
2085
2085
2086 # can we go through the fast path ?
2086 # can we go through the fast path ?
2087 heads.sort()
2087 heads.sort()
2088 if heads == sorted(self.heads()):
2088 if heads == sorted(self.heads()):
2089 return self._changegroup(csets, source)
2089 return self._changegroup(csets, source)
2090
2090
2091 # slow path
2091 # slow path
2092 self.hook('preoutgoing', throw=True, source=source)
2092 self.hook('preoutgoing', throw=True, source=source)
2093 self.changegroupinfo(csets, source)
2093 self.changegroupinfo(csets, source)
2094
2094
2095 # filter any nodes that claim to be part of the known set
2095 # filter any nodes that claim to be part of the known set
2096 def prune(revlog, missing):
2096 def prune(revlog, missing):
2097 rr, rl = revlog.rev, revlog.linkrev
2097 rr, rl = revlog.rev, revlog.linkrev
2098 return [n for n in missing
2098 return [n for n in missing
2099 if rl(rr(n)) not in commonrevs]
2099 if rl(rr(n)) not in commonrevs]
2100
2100
2101 progress = self.ui.progress
2101 progress = self.ui.progress
2102 _bundling = _('bundling')
2102 _bundling = _('bundling')
2103 _changesets = _('changesets')
2103 _changesets = _('changesets')
2104 _manifests = _('manifests')
2104 _manifests = _('manifests')
2105 _files = _('files')
2105 _files = _('files')
2106
2106
2107 def lookup(revlog, x):
2107 def lookup(revlog, x):
2108 if revlog == cl:
2108 if revlog == cl:
2109 c = cl.read(x)
2109 c = cl.read(x)
2110 changedfiles.update(c[3])
2110 changedfiles.update(c[3])
2111 mfs.setdefault(c[0], x)
2111 mfs.setdefault(c[0], x)
2112 count[0] += 1
2112 count[0] += 1
2113 progress(_bundling, count[0],
2113 progress(_bundling, count[0],
2114 unit=_changesets, total=count[1])
2114 unit=_changesets, total=count[1])
2115 return x
2115 return x
2116 elif revlog == mf:
2116 elif revlog == mf:
2117 clnode = mfs[x]
2117 clnode = mfs[x]
2118 mdata = mf.readfast(x)
2118 mdata = mf.readfast(x)
2119 for f, n in mdata.iteritems():
2119 for f, n in mdata.iteritems():
2120 if f in changedfiles:
2120 if f in changedfiles:
2121 fnodes[f].setdefault(n, clnode)
2121 fnodes[f].setdefault(n, clnode)
2122 count[0] += 1
2122 count[0] += 1
2123 progress(_bundling, count[0],
2123 progress(_bundling, count[0],
2124 unit=_manifests, total=count[1])
2124 unit=_manifests, total=count[1])
2125 return clnode
2125 return clnode
2126 else:
2126 else:
2127 progress(_bundling, count[0], item=fstate[0],
2127 progress(_bundling, count[0], item=fstate[0],
2128 unit=_files, total=count[1])
2128 unit=_files, total=count[1])
2129 return fstate[1][x]
2129 return fstate[1][x]
2130
2130
2131 bundler = changegroup.bundle10(lookup)
2131 bundler = changegroup.bundle10(lookup)
2132 reorder = self.ui.config('bundle', 'reorder', 'auto')
2132 reorder = self.ui.config('bundle', 'reorder', 'auto')
2133 if reorder == 'auto':
2133 if reorder == 'auto':
2134 reorder = None
2134 reorder = None
2135 else:
2135 else:
2136 reorder = util.parsebool(reorder)
2136 reorder = util.parsebool(reorder)
2137
2137
2138 def gengroup():
2138 def gengroup():
2139 # Create a changenode group generator that will call our functions
2139 # Create a changenode group generator that will call our functions
2140 # back to lookup the owning changenode and collect information.
2140 # back to lookup the owning changenode and collect information.
2141 count[:] = [0, len(csets)]
2141 count[:] = [0, len(csets)]
2142 for chunk in cl.group(csets, bundler, reorder=reorder):
2142 for chunk in cl.group(csets, bundler, reorder=reorder):
2143 yield chunk
2143 yield chunk
2144 progress(_bundling, None)
2144 progress(_bundling, None)
2145
2145
2146 # Create a generator for the manifestnodes that calls our lookup
2146 # Create a generator for the manifestnodes that calls our lookup
2147 # and data collection functions back.
2147 # and data collection functions back.
2148 for f in changedfiles:
2148 for f in changedfiles:
2149 fnodes[f] = {}
2149 fnodes[f] = {}
2150 count[:] = [0, len(mfs)]
2150 count[:] = [0, len(mfs)]
2151 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2151 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2152 yield chunk
2152 yield chunk
2153 progress(_bundling, None)
2153 progress(_bundling, None)
2154
2154
2155 mfs.clear()
2155 mfs.clear()
2156
2156
2157 # Go through all our files in order sorted by name.
2157 # Go through all our files in order sorted by name.
2158 count[:] = [0, len(changedfiles)]
2158 count[:] = [0, len(changedfiles)]
2159 for fname in sorted(changedfiles):
2159 for fname in sorted(changedfiles):
2160 filerevlog = self.file(fname)
2160 filerevlog = self.file(fname)
2161 if not len(filerevlog):
2161 if not len(filerevlog):
2162 raise util.Abort(_("empty or missing revlog for %s")
2162 raise util.Abort(_("empty or missing revlog for %s")
2163 % fname)
2163 % fname)
2164 fstate[0] = fname
2164 fstate[0] = fname
2165 fstate[1] = fnodes.pop(fname, {})
2165 fstate[1] = fnodes.pop(fname, {})
2166
2166
2167 nodelist = prune(filerevlog, fstate[1])
2167 nodelist = prune(filerevlog, fstate[1])
2168 if nodelist:
2168 if nodelist:
2169 count[0] += 1
2169 count[0] += 1
2170 yield bundler.fileheader(fname)
2170 yield bundler.fileheader(fname)
2171 for chunk in filerevlog.group(nodelist, bundler, reorder):
2171 for chunk in filerevlog.group(nodelist, bundler, reorder):
2172 yield chunk
2172 yield chunk
2173
2173
2174 # Signal that no more groups are left.
2174 # Signal that no more groups are left.
2175 yield bundler.close()
2175 yield bundler.close()
2176 progress(_bundling, None)
2176 progress(_bundling, None)
2177
2177
2178 if csets:
2178 if csets:
2179 self.hook('outgoing', node=hex(csets[0]), source=source)
2179 self.hook('outgoing', node=hex(csets[0]), source=source)
2180
2180
2181 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2181 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2182
2182
2183 def changegroup(self, basenodes, source):
2183 def changegroup(self, basenodes, source):
2184 # to avoid a race we use changegroupsubset() (issue1320)
2184 # to avoid a race we use changegroupsubset() (issue1320)
2185 return self.changegroupsubset(basenodes, self.heads(), source)
2185 return self.changegroupsubset(basenodes, self.heads(), source)
2186
2186
2187 def _changegroup(self, nodes, source):
2187 def _changegroup(self, nodes, source):
2188 """Compute the changegroup of all nodes that we have that a recipient
2188 """Compute the changegroup of all nodes that we have that a recipient
2189 doesn't. Return a chunkbuffer object whose read() method will return
2189 doesn't. Return a chunkbuffer object whose read() method will return
2190 successive changegroup chunks.
2190 successive changegroup chunks.
2191
2191
2192 This is much easier than the previous function as we can assume that
2192 This is much easier than the previous function as we can assume that
2193 the recipient has any changenode we aren't sending them.
2193 the recipient has any changenode we aren't sending them.
2194
2194
2195 nodes is the set of nodes to send"""
2195 nodes is the set of nodes to send"""
2196
2196
2197 cl = self.changelog
2197 cl = self.changelog
2198 mf = self.manifest
2198 mf = self.manifest
2199 mfs = {}
2199 mfs = {}
2200 changedfiles = set()
2200 changedfiles = set()
2201 fstate = ['']
2201 fstate = ['']
2202 count = [0, 0]
2202 count = [0, 0]
2203
2203
2204 self.hook('preoutgoing', throw=True, source=source)
2204 self.hook('preoutgoing', throw=True, source=source)
2205 self.changegroupinfo(nodes, source)
2205 self.changegroupinfo(nodes, source)
2206
2206
2207 revset = set([cl.rev(n) for n in nodes])
2207 revset = set([cl.rev(n) for n in nodes])
2208
2208
2209 def gennodelst(log):
2209 def gennodelst(log):
2210 ln, llr = log.node, log.linkrev
2210 ln, llr = log.node, log.linkrev
2211 return [ln(r) for r in log if llr(r) in revset]
2211 return [ln(r) for r in log if llr(r) in revset]
2212
2212
2213 progress = self.ui.progress
2213 progress = self.ui.progress
2214 _bundling = _('bundling')
2214 _bundling = _('bundling')
2215 _changesets = _('changesets')
2215 _changesets = _('changesets')
2216 _manifests = _('manifests')
2216 _manifests = _('manifests')
2217 _files = _('files')
2217 _files = _('files')
2218
2218
2219 def lookup(revlog, x):
2219 def lookup(revlog, x):
2220 if revlog == cl:
2220 if revlog == cl:
2221 c = cl.read(x)
2221 c = cl.read(x)
2222 changedfiles.update(c[3])
2222 changedfiles.update(c[3])
2223 mfs.setdefault(c[0], x)
2223 mfs.setdefault(c[0], x)
2224 count[0] += 1
2224 count[0] += 1
2225 progress(_bundling, count[0],
2225 progress(_bundling, count[0],
2226 unit=_changesets, total=count[1])
2226 unit=_changesets, total=count[1])
2227 return x
2227 return x
2228 elif revlog == mf:
2228 elif revlog == mf:
2229 count[0] += 1
2229 count[0] += 1
2230 progress(_bundling, count[0],
2230 progress(_bundling, count[0],
2231 unit=_manifests, total=count[1])
2231 unit=_manifests, total=count[1])
2232 return cl.node(revlog.linkrev(revlog.rev(x)))
2232 return cl.node(revlog.linkrev(revlog.rev(x)))
2233 else:
2233 else:
2234 progress(_bundling, count[0], item=fstate[0],
2234 progress(_bundling, count[0], item=fstate[0],
2235 total=count[1], unit=_files)
2235 total=count[1], unit=_files)
2236 return cl.node(revlog.linkrev(revlog.rev(x)))
2236 return cl.node(revlog.linkrev(revlog.rev(x)))
2237
2237
2238 bundler = changegroup.bundle10(lookup)
2238 bundler = changegroup.bundle10(lookup)
2239 reorder = self.ui.config('bundle', 'reorder', 'auto')
2239 reorder = self.ui.config('bundle', 'reorder', 'auto')
2240 if reorder == 'auto':
2240 if reorder == 'auto':
2241 reorder = None
2241 reorder = None
2242 else:
2242 else:
2243 reorder = util.parsebool(reorder)
2243 reorder = util.parsebool(reorder)
2244
2244
2245 def gengroup():
2245 def gengroup():
2246 '''yield a sequence of changegroup chunks (strings)'''
2246 '''yield a sequence of changegroup chunks (strings)'''
2247 # construct a list of all changed files
2247 # construct a list of all changed files
2248
2248
2249 count[:] = [0, len(nodes)]
2249 count[:] = [0, len(nodes)]
2250 for chunk in cl.group(nodes, bundler, reorder=reorder):
2250 for chunk in cl.group(nodes, bundler, reorder=reorder):
2251 yield chunk
2251 yield chunk
2252 progress(_bundling, None)
2252 progress(_bundling, None)
2253
2253
2254 count[:] = [0, len(mfs)]
2254 count[:] = [0, len(mfs)]
2255 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2255 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2256 yield chunk
2256 yield chunk
2257 progress(_bundling, None)
2257 progress(_bundling, None)
2258
2258
2259 count[:] = [0, len(changedfiles)]
2259 count[:] = [0, len(changedfiles)]
2260 for fname in sorted(changedfiles):
2260 for fname in sorted(changedfiles):
2261 filerevlog = self.file(fname)
2261 filerevlog = self.file(fname)
2262 if not len(filerevlog):
2262 if not len(filerevlog):
2263 raise util.Abort(_("empty or missing revlog for %s")
2263 raise util.Abort(_("empty or missing revlog for %s")
2264 % fname)
2264 % fname)
2265 fstate[0] = fname
2265 fstate[0] = fname
2266 nodelist = gennodelst(filerevlog)
2266 nodelist = gennodelst(filerevlog)
2267 if nodelist:
2267 if nodelist:
2268 count[0] += 1
2268 count[0] += 1
2269 yield bundler.fileheader(fname)
2269 yield bundler.fileheader(fname)
2270 for chunk in filerevlog.group(nodelist, bundler, reorder):
2270 for chunk in filerevlog.group(nodelist, bundler, reorder):
2271 yield chunk
2271 yield chunk
2272 yield bundler.close()
2272 yield bundler.close()
2273 progress(_bundling, None)
2273 progress(_bundling, None)
2274
2274
2275 if nodes:
2275 if nodes:
2276 self.hook('outgoing', node=hex(nodes[0]), source=source)
2276 self.hook('outgoing', node=hex(nodes[0]), source=source)
2277
2277
2278 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2278 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2279
2279
2280 def addchangegroup(self, source, srctype, url, emptyok=False):
2280 def addchangegroup(self, source, srctype, url, emptyok=False):
2281 """Add the changegroup returned by source.read() to this repo.
2281 """Add the changegroup returned by source.read() to this repo.
2282 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2282 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2283 the URL of the repo where this changegroup is coming from.
2283 the URL of the repo where this changegroup is coming from.
2284
2284
2285 Return an integer summarizing the change to this repo:
2285 Return an integer summarizing the change to this repo:
2286 - nothing changed or no source: 0
2286 - nothing changed or no source: 0
2287 - more heads than before: 1+added heads (2..n)
2287 - more heads than before: 1+added heads (2..n)
2288 - fewer heads than before: -1-removed heads (-2..-n)
2288 - fewer heads than before: -1-removed heads (-2..-n)
2289 - number of heads stays the same: 1
2289 - number of heads stays the same: 1
2290 """
2290 """
2291 def csmap(x):
2291 def csmap(x):
2292 self.ui.debug("add changeset %s\n" % short(x))
2292 self.ui.debug("add changeset %s\n" % short(x))
2293 return len(cl)
2293 return len(cl)
2294
2294
2295 def revmap(x):
2295 def revmap(x):
2296 return cl.rev(x)
2296 return cl.rev(x)
2297
2297
2298 if not source:
2298 if not source:
2299 return 0
2299 return 0
2300
2300
2301 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2301 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2302
2302
2303 changesets = files = revisions = 0
2303 changesets = files = revisions = 0
2304 efiles = set()
2304 efiles = set()
2305
2305
2306 # write changelog data to temp files so concurrent readers will not see
2306 # write changelog data to temp files so concurrent readers will not see
2307 # inconsistent view
2307 # inconsistent view
2308 cl = self.changelog
2308 cl = self.changelog
2309 cl.delayupdate()
2309 cl.delayupdate()
2310 oldheads = cl.heads()
2310 oldheads = cl.heads()
2311
2311
2312 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2312 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2313 try:
2313 try:
2314 trp = weakref.proxy(tr)
2314 trp = weakref.proxy(tr)
2315 # pull off the changeset group
2315 # pull off the changeset group
2316 self.ui.status(_("adding changesets\n"))
2316 self.ui.status(_("adding changesets\n"))
2317 clstart = len(cl)
2317 clstart = len(cl)
2318 class prog(object):
2318 class prog(object):
2319 step = _('changesets')
2319 step = _('changesets')
2320 count = 1
2320 count = 1
2321 ui = self.ui
2321 ui = self.ui
2322 total = None
2322 total = None
2323 def __call__(self):
2323 def __call__(self):
2324 self.ui.progress(self.step, self.count, unit=_('chunks'),
2324 self.ui.progress(self.step, self.count, unit=_('chunks'),
2325 total=self.total)
2325 total=self.total)
2326 self.count += 1
2326 self.count += 1
2327 pr = prog()
2327 pr = prog()
2328 source.callback = pr
2328 source.callback = pr
2329
2329
2330 source.changelogheader()
2330 source.changelogheader()
2331 srccontent = cl.addgroup(source, csmap, trp)
2331 srccontent = cl.addgroup(source, csmap, trp)
2332 if not (srccontent or emptyok):
2332 if not (srccontent or emptyok):
2333 raise util.Abort(_("received changelog group is empty"))
2333 raise util.Abort(_("received changelog group is empty"))
2334 clend = len(cl)
2334 clend = len(cl)
2335 changesets = clend - clstart
2335 changesets = clend - clstart
2336 for c in xrange(clstart, clend):
2336 for c in xrange(clstart, clend):
2337 efiles.update(self[c].files())
2337 efiles.update(self[c].files())
2338 efiles = len(efiles)
2338 efiles = len(efiles)
2339 self.ui.progress(_('changesets'), None)
2339 self.ui.progress(_('changesets'), None)
2340
2340
2341 # pull off the manifest group
2341 # pull off the manifest group
2342 self.ui.status(_("adding manifests\n"))
2342 self.ui.status(_("adding manifests\n"))
2343 pr.step = _('manifests')
2343 pr.step = _('manifests')
2344 pr.count = 1
2344 pr.count = 1
2345 pr.total = changesets # manifests <= changesets
2345 pr.total = changesets # manifests <= changesets
2346 # no need to check for empty manifest group here:
2346 # no need to check for empty manifest group here:
2347 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2347 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2348 # no new manifest will be created and the manifest group will
2348 # no new manifest will be created and the manifest group will
2349 # be empty during the pull
2349 # be empty during the pull
2350 source.manifestheader()
2350 source.manifestheader()
2351 self.manifest.addgroup(source, revmap, trp)
2351 self.manifest.addgroup(source, revmap, trp)
2352 self.ui.progress(_('manifests'), None)
2352 self.ui.progress(_('manifests'), None)
2353
2353
2354 needfiles = {}
2354 needfiles = {}
2355 if self.ui.configbool('server', 'validate', default=False):
2355 if self.ui.configbool('server', 'validate', default=False):
2356 # validate incoming csets have their manifests
2356 # validate incoming csets have their manifests
2357 for cset in xrange(clstart, clend):
2357 for cset in xrange(clstart, clend):
2358 mfest = self.changelog.read(self.changelog.node(cset))[0]
2358 mfest = self.changelog.read(self.changelog.node(cset))[0]
2359 mfest = self.manifest.readdelta(mfest)
2359 mfest = self.manifest.readdelta(mfest)
2360 # store file nodes we must see
2360 # store file nodes we must see
2361 for f, n in mfest.iteritems():
2361 for f, n in mfest.iteritems():
2362 needfiles.setdefault(f, set()).add(n)
2362 needfiles.setdefault(f, set()).add(n)
2363
2363
2364 # process the files
2364 # process the files
2365 self.ui.status(_("adding file changes\n"))
2365 self.ui.status(_("adding file changes\n"))
2366 pr.step = _('files')
2366 pr.step = _('files')
2367 pr.count = 1
2367 pr.count = 1
2368 pr.total = efiles
2368 pr.total = efiles
2369 source.callback = None
2369 source.callback = None
2370
2370
2371 while True:
2371 while True:
2372 chunkdata = source.filelogheader()
2372 chunkdata = source.filelogheader()
2373 if not chunkdata:
2373 if not chunkdata:
2374 break
2374 break
2375 f = chunkdata["filename"]
2375 f = chunkdata["filename"]
2376 self.ui.debug("adding %s revisions\n" % f)
2376 self.ui.debug("adding %s revisions\n" % f)
2377 pr()
2377 pr()
2378 fl = self.file(f)
2378 fl = self.file(f)
2379 o = len(fl)
2379 o = len(fl)
2380 if not fl.addgroup(source, revmap, trp):
2380 if not fl.addgroup(source, revmap, trp):
2381 raise util.Abort(_("received file revlog group is empty"))
2381 raise util.Abort(_("received file revlog group is empty"))
2382 revisions += len(fl) - o
2382 revisions += len(fl) - o
2383 files += 1
2383 files += 1
2384 if f in needfiles:
2384 if f in needfiles:
2385 needs = needfiles[f]
2385 needs = needfiles[f]
2386 for new in xrange(o, len(fl)):
2386 for new in xrange(o, len(fl)):
2387 n = fl.node(new)
2387 n = fl.node(new)
2388 if n in needs:
2388 if n in needs:
2389 needs.remove(n)
2389 needs.remove(n)
2390 if not needs:
2390 if not needs:
2391 del needfiles[f]
2391 del needfiles[f]
2392 self.ui.progress(_('files'), None)
2392 self.ui.progress(_('files'), None)
2393
2393
2394 for f, needs in needfiles.iteritems():
2394 for f, needs in needfiles.iteritems():
2395 fl = self.file(f)
2395 fl = self.file(f)
2396 for n in needs:
2396 for n in needs:
2397 try:
2397 try:
2398 fl.rev(n)
2398 fl.rev(n)
2399 except error.LookupError:
2399 except error.LookupError:
2400 raise util.Abort(
2400 raise util.Abort(
2401 _('missing file data for %s:%s - run hg verify') %
2401 _('missing file data for %s:%s - run hg verify') %
2402 (f, hex(n)))
2402 (f, hex(n)))
2403
2403
2404 dh = 0
2404 dh = 0
2405 if oldheads:
2405 if oldheads:
2406 heads = cl.heads()
2406 heads = cl.heads()
2407 dh = len(heads) - len(oldheads)
2407 dh = len(heads) - len(oldheads)
2408 for h in heads:
2408 for h in heads:
2409 if h not in oldheads and self[h].closesbranch():
2409 if h not in oldheads and self[h].closesbranch():
2410 dh -= 1
2410 dh -= 1
2411 htext = ""
2411 htext = ""
2412 if dh:
2412 if dh:
2413 htext = _(" (%+d heads)") % dh
2413 htext = _(" (%+d heads)") % dh
2414
2414
2415 self.ui.status(_("added %d changesets"
2415 self.ui.status(_("added %d changesets"
2416 " with %d changes to %d files%s\n")
2416 " with %d changes to %d files%s\n")
2417 % (changesets, revisions, files, htext))
2417 % (changesets, revisions, files, htext))
2418 obsolete.clearobscaches(self)
2418 obsolete.clearobscaches(self)
2419
2419
2420 if changesets > 0:
2420 if changesets > 0:
2421 p = lambda: cl.writepending() and self.root or ""
2421 p = lambda: cl.writepending() and self.root or ""
2422 self.hook('pretxnchangegroup', throw=True,
2422 self.hook('pretxnchangegroup', throw=True,
2423 node=hex(cl.node(clstart)), source=srctype,
2423 node=hex(cl.node(clstart)), source=srctype,
2424 url=url, pending=p)
2424 url=url, pending=p)
2425
2425
2426 added = [cl.node(r) for r in xrange(clstart, clend)]
2426 added = [cl.node(r) for r in xrange(clstart, clend)]
2427 publishing = self.ui.configbool('phases', 'publish', True)
2427 publishing = self.ui.configbool('phases', 'publish', True)
2428 if srctype == 'push':
2428 if srctype == 'push':
2429 # Old server can not push the boundary themself.
2429 # Old server can not push the boundary themself.
2430 # New server won't push the boundary if changeset already
2430 # New server won't push the boundary if changeset already
2431 # existed locally as secrete
2431 # existed locally as secrete
2432 #
2432 #
2433 # We should not use added here but the list of all change in
2433 # We should not use added here but the list of all change in
2434 # the bundle
2434 # the bundle
2435 if publishing:
2435 if publishing:
2436 phases.advanceboundary(self, phases.public, srccontent)
2436 phases.advanceboundary(self, phases.public, srccontent)
2437 else:
2437 else:
2438 phases.advanceboundary(self, phases.draft, srccontent)
2438 phases.advanceboundary(self, phases.draft, srccontent)
2439 phases.retractboundary(self, phases.draft, added)
2439 phases.retractboundary(self, phases.draft, added)
2440 elif srctype != 'strip':
2440 elif srctype != 'strip':
2441 # publishing only alter behavior during push
2441 # publishing only alter behavior during push
2442 #
2442 #
2443 # strip should not touch boundary at all
2443 # strip should not touch boundary at all
2444 phases.retractboundary(self, phases.draft, added)
2444 phases.retractboundary(self, phases.draft, added)
2445
2445
2446 # make changelog see real files again
2446 # make changelog see real files again
2447 cl.finalize(trp)
2447 cl.finalize(trp)
2448
2448
2449 tr.close()
2449 tr.close()
2450
2450
2451 if changesets > 0:
2451 if changesets > 0:
2452 self.updatebranchcache()
2452 self.updatebranchcache()
2453 def runhooks():
2453 def runhooks():
2454 # forcefully update the on-disk branch cache
2454 # forcefully update the on-disk branch cache
2455 self.ui.debug("updating the branch cache\n")
2455 self.ui.debug("updating the branch cache\n")
2456 self.hook("changegroup", node=hex(cl.node(clstart)),
2456 self.hook("changegroup", node=hex(cl.node(clstart)),
2457 source=srctype, url=url)
2457 source=srctype, url=url)
2458
2458
2459 for n in added:
2459 for n in added:
2460 self.hook("incoming", node=hex(n), source=srctype,
2460 self.hook("incoming", node=hex(n), source=srctype,
2461 url=url)
2461 url=url)
2462 self._afterlock(runhooks)
2462 self._afterlock(runhooks)
2463
2463
2464 finally:
2464 finally:
2465 tr.release()
2465 tr.release()
2466 # never return 0 here:
2466 # never return 0 here:
2467 if dh < 0:
2467 if dh < 0:
2468 return dh - 1
2468 return dh - 1
2469 else:
2469 else:
2470 return dh + 1
2470 return dh + 1
2471
2471
2472 def stream_in(self, remote, requirements):
2472 def stream_in(self, remote, requirements):
2473 lock = self.lock()
2473 lock = self.lock()
2474 try:
2474 try:
2475 # Save remote branchmap. We will use it later
2476 # to speed up branchcache creation
2477 rbranchmap = None
2478 if remote.capable("branchmap"):
2479 rbranchmap = remote.branchmap()
2480
2475 fp = remote.stream_out()
2481 fp = remote.stream_out()
2476 l = fp.readline()
2482 l = fp.readline()
2477 try:
2483 try:
2478 resp = int(l)
2484 resp = int(l)
2479 except ValueError:
2485 except ValueError:
2480 raise error.ResponseError(
2486 raise error.ResponseError(
2481 _('unexpected response from remote server:'), l)
2487 _('unexpected response from remote server:'), l)
2482 if resp == 1:
2488 if resp == 1:
2483 raise util.Abort(_('operation forbidden by server'))
2489 raise util.Abort(_('operation forbidden by server'))
2484 elif resp == 2:
2490 elif resp == 2:
2485 raise util.Abort(_('locking the remote repository failed'))
2491 raise util.Abort(_('locking the remote repository failed'))
2486 elif resp != 0:
2492 elif resp != 0:
2487 raise util.Abort(_('the server sent an unknown error code'))
2493 raise util.Abort(_('the server sent an unknown error code'))
2488 self.ui.status(_('streaming all changes\n'))
2494 self.ui.status(_('streaming all changes\n'))
2489 l = fp.readline()
2495 l = fp.readline()
2490 try:
2496 try:
2491 total_files, total_bytes = map(int, l.split(' ', 1))
2497 total_files, total_bytes = map(int, l.split(' ', 1))
2492 except (ValueError, TypeError):
2498 except (ValueError, TypeError):
2493 raise error.ResponseError(
2499 raise error.ResponseError(
2494 _('unexpected response from remote server:'), l)
2500 _('unexpected response from remote server:'), l)
2495 self.ui.status(_('%d files to transfer, %s of data\n') %
2501 self.ui.status(_('%d files to transfer, %s of data\n') %
2496 (total_files, util.bytecount(total_bytes)))
2502 (total_files, util.bytecount(total_bytes)))
2497 handled_bytes = 0
2503 handled_bytes = 0
2498 self.ui.progress(_('clone'), 0, total=total_bytes)
2504 self.ui.progress(_('clone'), 0, total=total_bytes)
2499 start = time.time()
2505 start = time.time()
2500 for i in xrange(total_files):
2506 for i in xrange(total_files):
2501 # XXX doesn't support '\n' or '\r' in filenames
2507 # XXX doesn't support '\n' or '\r' in filenames
2502 l = fp.readline()
2508 l = fp.readline()
2503 try:
2509 try:
2504 name, size = l.split('\0', 1)
2510 name, size = l.split('\0', 1)
2505 size = int(size)
2511 size = int(size)
2506 except (ValueError, TypeError):
2512 except (ValueError, TypeError):
2507 raise error.ResponseError(
2513 raise error.ResponseError(
2508 _('unexpected response from remote server:'), l)
2514 _('unexpected response from remote server:'), l)
2509 if self.ui.debugflag:
2515 if self.ui.debugflag:
2510 self.ui.debug('adding %s (%s)\n' %
2516 self.ui.debug('adding %s (%s)\n' %
2511 (name, util.bytecount(size)))
2517 (name, util.bytecount(size)))
2512 # for backwards compat, name was partially encoded
2518 # for backwards compat, name was partially encoded
2513 ofp = self.sopener(store.decodedir(name), 'w')
2519 ofp = self.sopener(store.decodedir(name), 'w')
2514 for chunk in util.filechunkiter(fp, limit=size):
2520 for chunk in util.filechunkiter(fp, limit=size):
2515 handled_bytes += len(chunk)
2521 handled_bytes += len(chunk)
2516 self.ui.progress(_('clone'), handled_bytes,
2522 self.ui.progress(_('clone'), handled_bytes,
2517 total=total_bytes)
2523 total=total_bytes)
2518 ofp.write(chunk)
2524 ofp.write(chunk)
2519 ofp.close()
2525 ofp.close()
2520 elapsed = time.time() - start
2526 elapsed = time.time() - start
2521 if elapsed <= 0:
2527 if elapsed <= 0:
2522 elapsed = 0.001
2528 elapsed = 0.001
2523 self.ui.progress(_('clone'), None)
2529 self.ui.progress(_('clone'), None)
2524 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2530 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2525 (util.bytecount(total_bytes), elapsed,
2531 (util.bytecount(total_bytes), elapsed,
2526 util.bytecount(total_bytes / elapsed)))
2532 util.bytecount(total_bytes / elapsed)))
2527
2533
2528 # new requirements = old non-format requirements +
2534 # new requirements = old non-format requirements +
2529 # new format-related
2535 # new format-related
2530 # requirements from the streamed-in repository
2536 # requirements from the streamed-in repository
2531 requirements.update(set(self.requirements) - self.supportedformats)
2537 requirements.update(set(self.requirements) - self.supportedformats)
2532 self._applyrequirements(requirements)
2538 self._applyrequirements(requirements)
2533 self._writerequirements()
2539 self._writerequirements()
2534
2540
2541 if rbranchmap:
2542 rbheads = []
2543 for bheads in rbranchmap.itervalues():
2544 rbheads.extend(bheads)
2545
2546 self.branchcache = rbranchmap
2547 if rbheads:
2548 rtiprev = max((int(self.changelog.rev(node))
2549 for node in rbheads))
2550 self._writebranchcache(self.branchcache,
2551 self[rtiprev].node(), rtiprev)
2535 self.invalidate()
2552 self.invalidate()
2536 return len(self.heads()) + 1
2553 return len(self.heads()) + 1
2537 finally:
2554 finally:
2538 lock.release()
2555 lock.release()
2539
2556
2540 def clone(self, remote, heads=[], stream=False):
2557 def clone(self, remote, heads=[], stream=False):
2541 '''clone remote repository.
2558 '''clone remote repository.
2542
2559
2543 keyword arguments:
2560 keyword arguments:
2544 heads: list of revs to clone (forces use of pull)
2561 heads: list of revs to clone (forces use of pull)
2545 stream: use streaming clone if possible'''
2562 stream: use streaming clone if possible'''
2546
2563
2547 # now, all clients that can request uncompressed clones can
2564 # now, all clients that can request uncompressed clones can
2548 # read repo formats supported by all servers that can serve
2565 # read repo formats supported by all servers that can serve
2549 # them.
2566 # them.
2550
2567
2551 # if revlog format changes, client will have to check version
2568 # if revlog format changes, client will have to check version
2552 # and format flags on "stream" capability, and use
2569 # and format flags on "stream" capability, and use
2553 # uncompressed only if compatible.
2570 # uncompressed only if compatible.
2554
2571
2555 if not stream:
2572 if not stream:
2556 # if the server explicitly prefers to stream (for fast LANs)
2573 # if the server explicitly prefers to stream (for fast LANs)
2557 stream = remote.capable('stream-preferred')
2574 stream = remote.capable('stream-preferred')
2558
2575
2559 if stream and not heads:
2576 if stream and not heads:
2560 # 'stream' means remote revlog format is revlogv1 only
2577 # 'stream' means remote revlog format is revlogv1 only
2561 if remote.capable('stream'):
2578 if remote.capable('stream'):
2562 return self.stream_in(remote, set(('revlogv1',)))
2579 return self.stream_in(remote, set(('revlogv1',)))
2563 # otherwise, 'streamreqs' contains the remote revlog format
2580 # otherwise, 'streamreqs' contains the remote revlog format
2564 streamreqs = remote.capable('streamreqs')
2581 streamreqs = remote.capable('streamreqs')
2565 if streamreqs:
2582 if streamreqs:
2566 streamreqs = set(streamreqs.split(','))
2583 streamreqs = set(streamreqs.split(','))
2567 # if we support it, stream in and adjust our requirements
2584 # if we support it, stream in and adjust our requirements
2568 if not streamreqs - self.supportedformats:
2585 if not streamreqs - self.supportedformats:
2569 return self.stream_in(remote, streamreqs)
2586 return self.stream_in(remote, streamreqs)
2570 return self.pull(remote, heads)
2587 return self.pull(remote, heads)
2571
2588
2572 def pushkey(self, namespace, key, old, new):
2589 def pushkey(self, namespace, key, old, new):
2573 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2590 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2574 old=old, new=new)
2591 old=old, new=new)
2575 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2592 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2576 ret = pushkey.push(self, namespace, key, old, new)
2593 ret = pushkey.push(self, namespace, key, old, new)
2577 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2594 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2578 ret=ret)
2595 ret=ret)
2579 return ret
2596 return ret
2580
2597
2581 def listkeys(self, namespace):
2598 def listkeys(self, namespace):
2582 self.hook('prelistkeys', throw=True, namespace=namespace)
2599 self.hook('prelistkeys', throw=True, namespace=namespace)
2583 self.ui.debug('listing keys for "%s"\n' % namespace)
2600 self.ui.debug('listing keys for "%s"\n' % namespace)
2584 values = pushkey.list(self, namespace)
2601 values = pushkey.list(self, namespace)
2585 self.hook('listkeys', namespace=namespace, values=values)
2602 self.hook('listkeys', namespace=namespace, values=values)
2586 return values
2603 return values
2587
2604
2588 def debugwireargs(self, one, two, three=None, four=None, five=None):
2605 def debugwireargs(self, one, two, three=None, four=None, five=None):
2589 '''used to test argument passing over the wire'''
2606 '''used to test argument passing over the wire'''
2590 return "%s %s %s %s %s" % (one, two, three, four, five)
2607 return "%s %s %s %s %s" % (one, two, three, four, five)
2591
2608
2592 def savecommitmessage(self, text):
2609 def savecommitmessage(self, text):
2593 fp = self.opener('last-message.txt', 'wb')
2610 fp = self.opener('last-message.txt', 'wb')
2594 try:
2611 try:
2595 fp.write(text)
2612 fp.write(text)
2596 finally:
2613 finally:
2597 fp.close()
2614 fp.close()
2598 return self.pathto(fp.name[len(self.root)+1:])
2615 return self.pathto(fp.name[len(self.root)+1:])
2599
2616
2600 # used to avoid circular references so destructors work
2617 # used to avoid circular references so destructors work
2601 def aftertrans(files):
2618 def aftertrans(files):
2602 renamefiles = [tuple(t) for t in files]
2619 renamefiles = [tuple(t) for t in files]
2603 def a():
2620 def a():
2604 for src, dest in renamefiles:
2621 for src, dest in renamefiles:
2605 try:
2622 try:
2606 util.rename(src, dest)
2623 util.rename(src, dest)
2607 except OSError: # journal file does not yet exist
2624 except OSError: # journal file does not yet exist
2608 pass
2625 pass
2609 return a
2626 return a
2610
2627
2611 def undoname(fn):
2628 def undoname(fn):
2612 base, name = os.path.split(fn)
2629 base, name = os.path.split(fn)
2613 assert name.startswith('journal')
2630 assert name.startswith('journal')
2614 return os.path.join(base, name.replace('journal', 'undo', 1))
2631 return os.path.join(base, name.replace('journal', 'undo', 1))
2615
2632
2616 def instance(ui, path, create):
2633 def instance(ui, path, create):
2617 return localrepository(ui, util.urllocalpath(path), create)
2634 return localrepository(ui, util.urllocalpath(path), create)
2618
2635
2619 def islocal(path):
2636 def islocal(path):
2620 return True
2637 return True
@@ -1,124 +1,125 b''
1 $ "$TESTDIR/hghave" serve || exit 80
1 $ "$TESTDIR/hghave" serve || exit 80
2
2
3 $ hg init a
3 $ hg init a
4 $ cd a
4 $ cd a
5 $ echo a > a
5 $ echo a > a
6 $ hg ci -Ama -d '1123456789 0'
6 $ hg ci -Ama -d '1123456789 0'
7 adding a
7 adding a
8 $ hg --config server.uncompressed=True serve -p $HGPORT -d --pid-file=hg.pid
8 $ hg --config server.uncompressed=True serve -p $HGPORT -d --pid-file=hg.pid
9 $ cat hg.pid >> $DAEMON_PIDS
9 $ cat hg.pid >> $DAEMON_PIDS
10 $ cd ..
10 $ cd ..
11 $ "$TESTDIR/tinyproxy.py" $HGPORT1 localhost >proxy.log 2>&1 </dev/null &
11 $ "$TESTDIR/tinyproxy.py" $HGPORT1 localhost >proxy.log 2>&1 </dev/null &
12 $ while [ ! -f proxy.pid ]; do sleep 0; done
12 $ while [ ! -f proxy.pid ]; do sleep 0; done
13 $ cat proxy.pid >> $DAEMON_PIDS
13 $ cat proxy.pid >> $DAEMON_PIDS
14
14
15 url for proxy, stream
15 url for proxy, stream
16
16
17 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone --uncompressed http://localhost:$HGPORT/ b
17 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone --uncompressed http://localhost:$HGPORT/ b
18 streaming all changes
18 streaming all changes
19 3 files to transfer, 303 bytes of data
19 3 files to transfer, 303 bytes of data
20 transferred * bytes in * seconds (*/sec) (glob)
20 transferred * bytes in * seconds (*/sec) (glob)
21 updating to branch default
21 updating to branch default
22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 $ cd b
23 $ cd b
24 $ hg verify
24 $ hg verify
25 checking changesets
25 checking changesets
26 checking manifests
26 checking manifests
27 crosschecking files in changesets and manifests
27 crosschecking files in changesets and manifests
28 checking files
28 checking files
29 1 files, 1 changesets, 1 total revisions
29 1 files, 1 changesets, 1 total revisions
30 $ cd ..
30 $ cd ..
31
31
32 url for proxy, pull
32 url for proxy, pull
33
33
34 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone http://localhost:$HGPORT/ b-pull
34 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone http://localhost:$HGPORT/ b-pull
35 requesting all changes
35 requesting all changes
36 adding changesets
36 adding changesets
37 adding manifests
37 adding manifests
38 adding file changes
38 adding file changes
39 added 1 changesets with 1 changes to 1 files
39 added 1 changesets with 1 changes to 1 files
40 updating to branch default
40 updating to branch default
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 $ cd b-pull
42 $ cd b-pull
43 $ hg verify
43 $ hg verify
44 checking changesets
44 checking changesets
45 checking manifests
45 checking manifests
46 crosschecking files in changesets and manifests
46 crosschecking files in changesets and manifests
47 checking files
47 checking files
48 1 files, 1 changesets, 1 total revisions
48 1 files, 1 changesets, 1 total revisions
49 $ cd ..
49 $ cd ..
50
50
51 host:port for proxy
51 host:port for proxy
52
52
53 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ c
53 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ c
54 requesting all changes
54 requesting all changes
55 adding changesets
55 adding changesets
56 adding manifests
56 adding manifests
57 adding file changes
57 adding file changes
58 added 1 changesets with 1 changes to 1 files
58 added 1 changesets with 1 changes to 1 files
59 updating to branch default
59 updating to branch default
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61
61
62 proxy url with user name and password
62 proxy url with user name and password
63
63
64 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ d
64 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ d
65 requesting all changes
65 requesting all changes
66 adding changesets
66 adding changesets
67 adding manifests
67 adding manifests
68 adding file changes
68 adding file changes
69 added 1 changesets with 1 changes to 1 files
69 added 1 changesets with 1 changes to 1 files
70 updating to branch default
70 updating to branch default
71 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
71 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72
72
73 url with user name and password
73 url with user name and password
74
74
75 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://user:passwd@localhost:$HGPORT/ e
75 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://user:passwd@localhost:$HGPORT/ e
76 requesting all changes
76 requesting all changes
77 adding changesets
77 adding changesets
78 adding manifests
78 adding manifests
79 adding file changes
79 adding file changes
80 added 1 changesets with 1 changes to 1 files
80 added 1 changesets with 1 changes to 1 files
81 updating to branch default
81 updating to branch default
82 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
82 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
83
83
84 bad host:port for proxy
84 bad host:port for proxy
85
85
86 $ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
86 $ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
87 abort: error: Connection refused
87 abort: error: Connection refused
88 [255]
88 [255]
89
89
90 do not use the proxy if it is in the no list
90 do not use the proxy if it is in the no list
91
91
92 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.no=localhost http://localhost:$HGPORT/ g
92 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.no=localhost http://localhost:$HGPORT/ g
93 requesting all changes
93 requesting all changes
94 adding changesets
94 adding changesets
95 adding manifests
95 adding manifests
96 adding file changes
96 adding file changes
97 added 1 changesets with 1 changes to 1 files
97 added 1 changesets with 1 changes to 1 files
98 updating to branch default
98 updating to branch default
99 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
99 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 $ cat proxy.log
100 $ cat proxy.log
101 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
101 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
102 * - - [*] "GET http://localhost:$HGPORT/?cmd=branchmap HTTP/1.1" - - (glob)
102 * - - [*] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - (glob)
103 * - - [*] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - (glob)
103 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
104 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
104 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
105 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
105 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
106 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
106 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
107 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
107 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
108 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
108 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
109 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
109 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
110 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
110 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
111 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
111 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
112 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
112 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
113 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
113 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
114 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
114 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
115 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
115 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
116 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
116 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
117 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
117 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
118 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
118 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
119 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
119 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
120 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
120 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
121 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
121 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
122 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
122 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
123 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
123 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
124 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
124
125
General Comments 0
You need to be logged in to leave comments. Login now