##// END OF EJS Templates
repair: use `first` instead of direct indexing...
Pierre-Yves David -
r22818:d7b11449 default
parent child Browse files
Show More
@@ -1,669 +1,676
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 from node import nullid
11 from node import nullid
12 import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
12 import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
13 import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
13 import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
14 import cmdutil, discovery, repoview, exchange
14 import cmdutil, discovery, repoview, exchange
15 import merge as mergemod
15 import merge as mergemod
16 import verify as verifymod
16 import verify as verifymod
17 import errno, os, shutil
17 import errno, os, shutil
18
18
19 def _local(path):
19 def _local(path):
20 path = util.expandpath(util.urllocalpath(path))
20 path = util.expandpath(util.urllocalpath(path))
21 return (os.path.isfile(path) and bundlerepo or localrepo)
21 return (os.path.isfile(path) and bundlerepo or localrepo)
22
22
23 def addbranchrevs(lrepo, other, branches, revs):
23 def addbranchrevs(lrepo, other, branches, revs):
24 peer = other.peer() # a courtesy to callers using a localrepo for other
24 peer = other.peer() # a courtesy to callers using a localrepo for other
25 hashbranch, branches = branches
25 hashbranch, branches = branches
26 if not hashbranch and not branches:
26 if not hashbranch and not branches:
27 return revs or None, revs and revs[0] or None
27 x = revs or None
28 if util.safehasattr(revs, 'first'):
29 y = revs.first()
30 elif revs:
31 y = revs[0]
32 else:
33 y = None
34 return x, y
28 revs = revs and list(revs) or []
35 revs = revs and list(revs) or []
29 if not peer.capable('branchmap'):
36 if not peer.capable('branchmap'):
30 if branches:
37 if branches:
31 raise util.Abort(_("remote branch lookup not supported"))
38 raise util.Abort(_("remote branch lookup not supported"))
32 revs.append(hashbranch)
39 revs.append(hashbranch)
33 return revs, revs[0]
40 return revs, revs[0]
34 branchmap = peer.branchmap()
41 branchmap = peer.branchmap()
35
42
36 def primary(branch):
43 def primary(branch):
37 if branch == '.':
44 if branch == '.':
38 if not lrepo:
45 if not lrepo:
39 raise util.Abort(_("dirstate branch not accessible"))
46 raise util.Abort(_("dirstate branch not accessible"))
40 branch = lrepo.dirstate.branch()
47 branch = lrepo.dirstate.branch()
41 if branch in branchmap:
48 if branch in branchmap:
42 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
49 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
43 return True
50 return True
44 else:
51 else:
45 return False
52 return False
46
53
47 for branch in branches:
54 for branch in branches:
48 if not primary(branch):
55 if not primary(branch):
49 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
56 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
50 if hashbranch:
57 if hashbranch:
51 if not primary(hashbranch):
58 if not primary(hashbranch):
52 revs.append(hashbranch)
59 revs.append(hashbranch)
53 return revs, revs[0]
60 return revs, revs[0]
54
61
55 def parseurl(path, branches=None):
62 def parseurl(path, branches=None):
56 '''parse url#branch, returning (url, (branch, branches))'''
63 '''parse url#branch, returning (url, (branch, branches))'''
57
64
58 u = util.url(path)
65 u = util.url(path)
59 branch = None
66 branch = None
60 if u.fragment:
67 if u.fragment:
61 branch = u.fragment
68 branch = u.fragment
62 u.fragment = None
69 u.fragment = None
63 return str(u), (branch, branches or [])
70 return str(u), (branch, branches or [])
64
71
65 schemes = {
72 schemes = {
66 'bundle': bundlerepo,
73 'bundle': bundlerepo,
67 'union': unionrepo,
74 'union': unionrepo,
68 'file': _local,
75 'file': _local,
69 'http': httppeer,
76 'http': httppeer,
70 'https': httppeer,
77 'https': httppeer,
71 'ssh': sshpeer,
78 'ssh': sshpeer,
72 'static-http': statichttprepo,
79 'static-http': statichttprepo,
73 }
80 }
74
81
75 def _peerlookup(path):
82 def _peerlookup(path):
76 u = util.url(path)
83 u = util.url(path)
77 scheme = u.scheme or 'file'
84 scheme = u.scheme or 'file'
78 thing = schemes.get(scheme) or schemes['file']
85 thing = schemes.get(scheme) or schemes['file']
79 try:
86 try:
80 return thing(path)
87 return thing(path)
81 except TypeError:
88 except TypeError:
82 return thing
89 return thing
83
90
84 def islocal(repo):
91 def islocal(repo):
85 '''return true if repo (or path pointing to repo) is local'''
92 '''return true if repo (or path pointing to repo) is local'''
86 if isinstance(repo, str):
93 if isinstance(repo, str):
87 try:
94 try:
88 return _peerlookup(repo).islocal(repo)
95 return _peerlookup(repo).islocal(repo)
89 except AttributeError:
96 except AttributeError:
90 return False
97 return False
91 return repo.local()
98 return repo.local()
92
99
93 def openpath(ui, path):
100 def openpath(ui, path):
94 '''open path with open if local, url.open if remote'''
101 '''open path with open if local, url.open if remote'''
95 pathurl = util.url(path, parsequery=False, parsefragment=False)
102 pathurl = util.url(path, parsequery=False, parsefragment=False)
96 if pathurl.islocal():
103 if pathurl.islocal():
97 return util.posixfile(pathurl.localpath(), 'rb')
104 return util.posixfile(pathurl.localpath(), 'rb')
98 else:
105 else:
99 return url.open(ui, path)
106 return url.open(ui, path)
100
107
101 # a list of (ui, repo) functions called for wire peer initialization
108 # a list of (ui, repo) functions called for wire peer initialization
102 wirepeersetupfuncs = []
109 wirepeersetupfuncs = []
103
110
104 def _peerorrepo(ui, path, create=False):
111 def _peerorrepo(ui, path, create=False):
105 """return a repository object for the specified path"""
112 """return a repository object for the specified path"""
106 obj = _peerlookup(path).instance(ui, path, create)
113 obj = _peerlookup(path).instance(ui, path, create)
107 ui = getattr(obj, "ui", ui)
114 ui = getattr(obj, "ui", ui)
108 for name, module in extensions.extensions(ui):
115 for name, module in extensions.extensions(ui):
109 hook = getattr(module, 'reposetup', None)
116 hook = getattr(module, 'reposetup', None)
110 if hook:
117 if hook:
111 hook(ui, obj)
118 hook(ui, obj)
112 if not obj.local():
119 if not obj.local():
113 for f in wirepeersetupfuncs:
120 for f in wirepeersetupfuncs:
114 f(ui, obj)
121 f(ui, obj)
115 return obj
122 return obj
116
123
117 def repository(ui, path='', create=False):
124 def repository(ui, path='', create=False):
118 """return a repository object for the specified path"""
125 """return a repository object for the specified path"""
119 peer = _peerorrepo(ui, path, create)
126 peer = _peerorrepo(ui, path, create)
120 repo = peer.local()
127 repo = peer.local()
121 if not repo:
128 if not repo:
122 raise util.Abort(_("repository '%s' is not local") %
129 raise util.Abort(_("repository '%s' is not local") %
123 (path or peer.url()))
130 (path or peer.url()))
124 return repo.filtered('visible')
131 return repo.filtered('visible')
125
132
126 def peer(uiorrepo, opts, path, create=False):
133 def peer(uiorrepo, opts, path, create=False):
127 '''return a repository peer for the specified path'''
134 '''return a repository peer for the specified path'''
128 rui = remoteui(uiorrepo, opts)
135 rui = remoteui(uiorrepo, opts)
129 return _peerorrepo(rui, path, create).peer()
136 return _peerorrepo(rui, path, create).peer()
130
137
131 def defaultdest(source):
138 def defaultdest(source):
132 '''return default destination of clone if none is given
139 '''return default destination of clone if none is given
133
140
134 >>> defaultdest('foo')
141 >>> defaultdest('foo')
135 'foo'
142 'foo'
136 >>> defaultdest('/foo/bar')
143 >>> defaultdest('/foo/bar')
137 'bar'
144 'bar'
138 >>> defaultdest('/')
145 >>> defaultdest('/')
139 ''
146 ''
140 >>> defaultdest('')
147 >>> defaultdest('')
141 ''
148 ''
142 >>> defaultdest('http://example.org/')
149 >>> defaultdest('http://example.org/')
143 ''
150 ''
144 >>> defaultdest('http://example.org/foo/')
151 >>> defaultdest('http://example.org/foo/')
145 'foo'
152 'foo'
146 '''
153 '''
147 path = util.url(source).path
154 path = util.url(source).path
148 if not path:
155 if not path:
149 return ''
156 return ''
150 return os.path.basename(os.path.normpath(path))
157 return os.path.basename(os.path.normpath(path))
151
158
152 def share(ui, source, dest=None, update=True):
159 def share(ui, source, dest=None, update=True):
153 '''create a shared repository'''
160 '''create a shared repository'''
154
161
155 if not islocal(source):
162 if not islocal(source):
156 raise util.Abort(_('can only share local repositories'))
163 raise util.Abort(_('can only share local repositories'))
157
164
158 if not dest:
165 if not dest:
159 dest = defaultdest(source)
166 dest = defaultdest(source)
160 else:
167 else:
161 dest = ui.expandpath(dest)
168 dest = ui.expandpath(dest)
162
169
163 if isinstance(source, str):
170 if isinstance(source, str):
164 origsource = ui.expandpath(source)
171 origsource = ui.expandpath(source)
165 source, branches = parseurl(origsource)
172 source, branches = parseurl(origsource)
166 srcrepo = repository(ui, source)
173 srcrepo = repository(ui, source)
167 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
174 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
168 else:
175 else:
169 srcrepo = source.local()
176 srcrepo = source.local()
170 origsource = source = srcrepo.url()
177 origsource = source = srcrepo.url()
171 checkout = None
178 checkout = None
172
179
173 sharedpath = srcrepo.sharedpath # if our source is already sharing
180 sharedpath = srcrepo.sharedpath # if our source is already sharing
174
181
175 destwvfs = scmutil.vfs(dest, realpath=True)
182 destwvfs = scmutil.vfs(dest, realpath=True)
176 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
183 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
177
184
178 if destvfs.lexists():
185 if destvfs.lexists():
179 raise util.Abort(_('destination already exists'))
186 raise util.Abort(_('destination already exists'))
180
187
181 if not destwvfs.isdir():
188 if not destwvfs.isdir():
182 destwvfs.mkdir()
189 destwvfs.mkdir()
183 destvfs.makedir()
190 destvfs.makedir()
184
191
185 requirements = ''
192 requirements = ''
186 try:
193 try:
187 requirements = srcrepo.opener.read('requires')
194 requirements = srcrepo.opener.read('requires')
188 except IOError, inst:
195 except IOError, inst:
189 if inst.errno != errno.ENOENT:
196 if inst.errno != errno.ENOENT:
190 raise
197 raise
191
198
192 requirements += 'shared\n'
199 requirements += 'shared\n'
193 destvfs.write('requires', requirements)
200 destvfs.write('requires', requirements)
194 destvfs.write('sharedpath', sharedpath)
201 destvfs.write('sharedpath', sharedpath)
195
202
196 r = repository(ui, destwvfs.base)
203 r = repository(ui, destwvfs.base)
197
204
198 default = srcrepo.ui.config('paths', 'default')
205 default = srcrepo.ui.config('paths', 'default')
199 if default:
206 if default:
200 fp = r.opener("hgrc", "w", text=True)
207 fp = r.opener("hgrc", "w", text=True)
201 fp.write("[paths]\n")
208 fp.write("[paths]\n")
202 fp.write("default = %s\n" % default)
209 fp.write("default = %s\n" % default)
203 fp.close()
210 fp.close()
204
211
205 if update:
212 if update:
206 r.ui.status(_("updating working directory\n"))
213 r.ui.status(_("updating working directory\n"))
207 if update is not True:
214 if update is not True:
208 checkout = update
215 checkout = update
209 for test in (checkout, 'default', 'tip'):
216 for test in (checkout, 'default', 'tip'):
210 if test is None:
217 if test is None:
211 continue
218 continue
212 try:
219 try:
213 uprev = r.lookup(test)
220 uprev = r.lookup(test)
214 break
221 break
215 except error.RepoLookupError:
222 except error.RepoLookupError:
216 continue
223 continue
217 _update(r, uprev)
224 _update(r, uprev)
218
225
219 def copystore(ui, srcrepo, destpath):
226 def copystore(ui, srcrepo, destpath):
220 '''copy files from store of srcrepo in destpath
227 '''copy files from store of srcrepo in destpath
221
228
222 returns destlock
229 returns destlock
223 '''
230 '''
224 destlock = None
231 destlock = None
225 try:
232 try:
226 hardlink = None
233 hardlink = None
227 num = 0
234 num = 0
228 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
235 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
229 srcvfs = scmutil.vfs(srcrepo.sharedpath)
236 srcvfs = scmutil.vfs(srcrepo.sharedpath)
230 dstvfs = scmutil.vfs(destpath)
237 dstvfs = scmutil.vfs(destpath)
231 for f in srcrepo.store.copylist():
238 for f in srcrepo.store.copylist():
232 if srcpublishing and f.endswith('phaseroots'):
239 if srcpublishing and f.endswith('phaseroots'):
233 continue
240 continue
234 dstbase = os.path.dirname(f)
241 dstbase = os.path.dirname(f)
235 if dstbase and not dstvfs.exists(dstbase):
242 if dstbase and not dstvfs.exists(dstbase):
236 dstvfs.mkdir(dstbase)
243 dstvfs.mkdir(dstbase)
237 if srcvfs.exists(f):
244 if srcvfs.exists(f):
238 if f.endswith('data'):
245 if f.endswith('data'):
239 # 'dstbase' may be empty (e.g. revlog format 0)
246 # 'dstbase' may be empty (e.g. revlog format 0)
240 lockfile = os.path.join(dstbase, "lock")
247 lockfile = os.path.join(dstbase, "lock")
241 # lock to avoid premature writing to the target
248 # lock to avoid premature writing to the target
242 destlock = lock.lock(dstvfs, lockfile)
249 destlock = lock.lock(dstvfs, lockfile)
243 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
250 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
244 hardlink)
251 hardlink)
245 num += n
252 num += n
246 if hardlink:
253 if hardlink:
247 ui.debug("linked %d files\n" % num)
254 ui.debug("linked %d files\n" % num)
248 else:
255 else:
249 ui.debug("copied %d files\n" % num)
256 ui.debug("copied %d files\n" % num)
250 return destlock
257 return destlock
251 except: # re-raises
258 except: # re-raises
252 release(destlock)
259 release(destlock)
253 raise
260 raise
254
261
255 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
262 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
256 update=True, stream=False, branch=None):
263 update=True, stream=False, branch=None):
257 """Make a copy of an existing repository.
264 """Make a copy of an existing repository.
258
265
259 Create a copy of an existing repository in a new directory. The
266 Create a copy of an existing repository in a new directory. The
260 source and destination are URLs, as passed to the repository
267 source and destination are URLs, as passed to the repository
261 function. Returns a pair of repository peers, the source and
268 function. Returns a pair of repository peers, the source and
262 newly created destination.
269 newly created destination.
263
270
264 The location of the source is added to the new repository's
271 The location of the source is added to the new repository's
265 .hg/hgrc file, as the default to be used for future pulls and
272 .hg/hgrc file, as the default to be used for future pulls and
266 pushes.
273 pushes.
267
274
268 If an exception is raised, the partly cloned/updated destination
275 If an exception is raised, the partly cloned/updated destination
269 repository will be deleted.
276 repository will be deleted.
270
277
271 Arguments:
278 Arguments:
272
279
273 source: repository object or URL
280 source: repository object or URL
274
281
275 dest: URL of destination repository to create (defaults to base
282 dest: URL of destination repository to create (defaults to base
276 name of source repository)
283 name of source repository)
277
284
278 pull: always pull from source repository, even in local case
285 pull: always pull from source repository, even in local case
279
286
280 stream: stream raw data uncompressed from repository (fast over
287 stream: stream raw data uncompressed from repository (fast over
281 LAN, slow over WAN)
288 LAN, slow over WAN)
282
289
283 rev: revision to clone up to (implies pull=True)
290 rev: revision to clone up to (implies pull=True)
284
291
285 update: update working directory after clone completes, if
292 update: update working directory after clone completes, if
286 destination is local repository (True means update to default rev,
293 destination is local repository (True means update to default rev,
287 anything else is treated as a revision)
294 anything else is treated as a revision)
288
295
289 branch: branches to clone
296 branch: branches to clone
290 """
297 """
291
298
292 if isinstance(source, str):
299 if isinstance(source, str):
293 origsource = ui.expandpath(source)
300 origsource = ui.expandpath(source)
294 source, branch = parseurl(origsource, branch)
301 source, branch = parseurl(origsource, branch)
295 srcpeer = peer(ui, peeropts, source)
302 srcpeer = peer(ui, peeropts, source)
296 else:
303 else:
297 srcpeer = source.peer() # in case we were called with a localrepo
304 srcpeer = source.peer() # in case we were called with a localrepo
298 branch = (None, branch or [])
305 branch = (None, branch or [])
299 origsource = source = srcpeer.url()
306 origsource = source = srcpeer.url()
300 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
307 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
301
308
302 if dest is None:
309 if dest is None:
303 dest = defaultdest(source)
310 dest = defaultdest(source)
304 if dest:
311 if dest:
305 ui.status(_("destination directory: %s\n") % dest)
312 ui.status(_("destination directory: %s\n") % dest)
306 else:
313 else:
307 dest = ui.expandpath(dest)
314 dest = ui.expandpath(dest)
308
315
309 dest = util.urllocalpath(dest)
316 dest = util.urllocalpath(dest)
310 source = util.urllocalpath(source)
317 source = util.urllocalpath(source)
311
318
312 if not dest:
319 if not dest:
313 raise util.Abort(_("empty destination path is not valid"))
320 raise util.Abort(_("empty destination path is not valid"))
314
321
315 destvfs = scmutil.vfs(dest, expandpath=True)
322 destvfs = scmutil.vfs(dest, expandpath=True)
316 if destvfs.lexists():
323 if destvfs.lexists():
317 if not destvfs.isdir():
324 if not destvfs.isdir():
318 raise util.Abort(_("destination '%s' already exists") % dest)
325 raise util.Abort(_("destination '%s' already exists") % dest)
319 elif destvfs.listdir():
326 elif destvfs.listdir():
320 raise util.Abort(_("destination '%s' is not empty") % dest)
327 raise util.Abort(_("destination '%s' is not empty") % dest)
321
328
322 srclock = destlock = cleandir = None
329 srclock = destlock = cleandir = None
323 srcrepo = srcpeer.local()
330 srcrepo = srcpeer.local()
324 try:
331 try:
325 abspath = origsource
332 abspath = origsource
326 if islocal(origsource):
333 if islocal(origsource):
327 abspath = os.path.abspath(util.urllocalpath(origsource))
334 abspath = os.path.abspath(util.urllocalpath(origsource))
328
335
329 if islocal(dest):
336 if islocal(dest):
330 cleandir = dest
337 cleandir = dest
331
338
332 copy = False
339 copy = False
333 if (srcrepo and srcrepo.cancopy() and islocal(dest)
340 if (srcrepo and srcrepo.cancopy() and islocal(dest)
334 and not phases.hassecret(srcrepo)):
341 and not phases.hassecret(srcrepo)):
335 copy = not pull and not rev
342 copy = not pull and not rev
336
343
337 if copy:
344 if copy:
338 try:
345 try:
339 # we use a lock here because if we race with commit, we
346 # we use a lock here because if we race with commit, we
340 # can end up with extra data in the cloned revlogs that's
347 # can end up with extra data in the cloned revlogs that's
341 # not pointed to by changesets, thus causing verify to
348 # not pointed to by changesets, thus causing verify to
342 # fail
349 # fail
343 srclock = srcrepo.lock(wait=False)
350 srclock = srcrepo.lock(wait=False)
344 except error.LockError:
351 except error.LockError:
345 copy = False
352 copy = False
346
353
347 if copy:
354 if copy:
348 srcrepo.hook('preoutgoing', throw=True, source='clone')
355 srcrepo.hook('preoutgoing', throw=True, source='clone')
349 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
356 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
350 if not os.path.exists(dest):
357 if not os.path.exists(dest):
351 os.mkdir(dest)
358 os.mkdir(dest)
352 else:
359 else:
353 # only clean up directories we create ourselves
360 # only clean up directories we create ourselves
354 cleandir = hgdir
361 cleandir = hgdir
355 try:
362 try:
356 destpath = hgdir
363 destpath = hgdir
357 util.makedir(destpath, notindexed=True)
364 util.makedir(destpath, notindexed=True)
358 except OSError, inst:
365 except OSError, inst:
359 if inst.errno == errno.EEXIST:
366 if inst.errno == errno.EEXIST:
360 cleandir = None
367 cleandir = None
361 raise util.Abort(_("destination '%s' already exists")
368 raise util.Abort(_("destination '%s' already exists")
362 % dest)
369 % dest)
363 raise
370 raise
364
371
365 destlock = copystore(ui, srcrepo, destpath)
372 destlock = copystore(ui, srcrepo, destpath)
366 # copy bookmarks over
373 # copy bookmarks over
367 srcbookmarks = srcrepo.join('bookmarks')
374 srcbookmarks = srcrepo.join('bookmarks')
368 dstbookmarks = os.path.join(destpath, 'bookmarks')
375 dstbookmarks = os.path.join(destpath, 'bookmarks')
369 if os.path.exists(srcbookmarks):
376 if os.path.exists(srcbookmarks):
370 util.copyfile(srcbookmarks, dstbookmarks)
377 util.copyfile(srcbookmarks, dstbookmarks)
371
378
372 # Recomputing branch cache might be slow on big repos,
379 # Recomputing branch cache might be slow on big repos,
373 # so just copy it
380 # so just copy it
374 def copybranchcache(fname):
381 def copybranchcache(fname):
375 srcbranchcache = srcrepo.join('cache/%s' % fname)
382 srcbranchcache = srcrepo.join('cache/%s' % fname)
376 dstbranchcache = os.path.join(dstcachedir, fname)
383 dstbranchcache = os.path.join(dstcachedir, fname)
377 if os.path.exists(srcbranchcache):
384 if os.path.exists(srcbranchcache):
378 if not os.path.exists(dstcachedir):
385 if not os.path.exists(dstcachedir):
379 os.mkdir(dstcachedir)
386 os.mkdir(dstcachedir)
380 util.copyfile(srcbranchcache, dstbranchcache)
387 util.copyfile(srcbranchcache, dstbranchcache)
381
388
382 dstcachedir = os.path.join(destpath, 'cache')
389 dstcachedir = os.path.join(destpath, 'cache')
383 # In local clones we're copying all nodes, not just served
390 # In local clones we're copying all nodes, not just served
384 # ones. Therefore copy all branchcaches over.
391 # ones. Therefore copy all branchcaches over.
385 copybranchcache('branch2')
392 copybranchcache('branch2')
386 for cachename in repoview.filtertable:
393 for cachename in repoview.filtertable:
387 copybranchcache('branch2-%s' % cachename)
394 copybranchcache('branch2-%s' % cachename)
388
395
389 # we need to re-init the repo after manually copying the data
396 # we need to re-init the repo after manually copying the data
390 # into it
397 # into it
391 destpeer = peer(srcrepo, peeropts, dest)
398 destpeer = peer(srcrepo, peeropts, dest)
392 srcrepo.hook('outgoing', source='clone',
399 srcrepo.hook('outgoing', source='clone',
393 node=node.hex(node.nullid))
400 node=node.hex(node.nullid))
394 else:
401 else:
395 try:
402 try:
396 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
403 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
397 # only pass ui when no srcrepo
404 # only pass ui when no srcrepo
398 except OSError, inst:
405 except OSError, inst:
399 if inst.errno == errno.EEXIST:
406 if inst.errno == errno.EEXIST:
400 cleandir = None
407 cleandir = None
401 raise util.Abort(_("destination '%s' already exists")
408 raise util.Abort(_("destination '%s' already exists")
402 % dest)
409 % dest)
403 raise
410 raise
404
411
405 revs = None
412 revs = None
406 if rev:
413 if rev:
407 if not srcpeer.capable('lookup'):
414 if not srcpeer.capable('lookup'):
408 raise util.Abort(_("src repository does not support "
415 raise util.Abort(_("src repository does not support "
409 "revision lookup and so doesn't "
416 "revision lookup and so doesn't "
410 "support clone by revision"))
417 "support clone by revision"))
411 revs = [srcpeer.lookup(r) for r in rev]
418 revs = [srcpeer.lookup(r) for r in rev]
412 checkout = revs[0]
419 checkout = revs[0]
413 if destpeer.local():
420 if destpeer.local():
414 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
421 destpeer.local().clone(srcpeer, heads=revs, stream=stream)
415 elif srcrepo:
422 elif srcrepo:
416 exchange.push(srcrepo, destpeer, revs=revs,
423 exchange.push(srcrepo, destpeer, revs=revs,
417 bookmarks=srcrepo._bookmarks.keys())
424 bookmarks=srcrepo._bookmarks.keys())
418 else:
425 else:
419 raise util.Abort(_("clone from remote to remote not supported"))
426 raise util.Abort(_("clone from remote to remote not supported"))
420
427
421 cleandir = None
428 cleandir = None
422
429
423 destrepo = destpeer.local()
430 destrepo = destpeer.local()
424 if destrepo:
431 if destrepo:
425 template = (
432 template = (
426 '# You may want to set your username here if it is not set\n'
433 '# You may want to set your username here if it is not set\n'
427 "# globally, or this repository requires a different\n"
434 "# globally, or this repository requires a different\n"
428 '# username from your usual configuration. If you want to\n'
435 '# username from your usual configuration. If you want to\n'
429 '# set something for all of your repositories on this\n'
436 '# set something for all of your repositories on this\n'
430 '# computer, try running the command\n'
437 '# computer, try running the command\n'
431 "# 'hg config --edit --global'\n"
438 "# 'hg config --edit --global'\n"
432 '# [ui]\n'
439 '# [ui]\n'
433 '# username = Jane Doe <jdoe@example.com>\n'
440 '# username = Jane Doe <jdoe@example.com>\n'
434 '[paths]\n'
441 '[paths]\n'
435 'default = %s\n'
442 'default = %s\n'
436 )
443 )
437 fp = destrepo.opener("hgrc", "w", text=True)
444 fp = destrepo.opener("hgrc", "w", text=True)
438 u = util.url(abspath)
445 u = util.url(abspath)
439 u.passwd = None
446 u.passwd = None
440 defaulturl = str(u)
447 defaulturl = str(u)
441 fp.write(template % defaulturl)
448 fp.write(template % defaulturl)
442 fp.close()
449 fp.close()
443
450
444 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
451 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
445
452
446 if update:
453 if update:
447 if update is not True:
454 if update is not True:
448 checkout = srcpeer.lookup(update)
455 checkout = srcpeer.lookup(update)
449 uprev = None
456 uprev = None
450 status = None
457 status = None
451 if checkout is not None:
458 if checkout is not None:
452 try:
459 try:
453 uprev = destrepo.lookup(checkout)
460 uprev = destrepo.lookup(checkout)
454 except error.RepoLookupError:
461 except error.RepoLookupError:
455 pass
462 pass
456 if uprev is None:
463 if uprev is None:
457 try:
464 try:
458 uprev = destrepo._bookmarks['@']
465 uprev = destrepo._bookmarks['@']
459 update = '@'
466 update = '@'
460 bn = destrepo[uprev].branch()
467 bn = destrepo[uprev].branch()
461 if bn == 'default':
468 if bn == 'default':
462 status = _("updating to bookmark @\n")
469 status = _("updating to bookmark @\n")
463 else:
470 else:
464 status = (_("updating to bookmark @ on branch %s\n")
471 status = (_("updating to bookmark @ on branch %s\n")
465 % bn)
472 % bn)
466 except KeyError:
473 except KeyError:
467 try:
474 try:
468 uprev = destrepo.branchtip('default')
475 uprev = destrepo.branchtip('default')
469 except error.RepoLookupError:
476 except error.RepoLookupError:
470 uprev = destrepo.lookup('tip')
477 uprev = destrepo.lookup('tip')
471 if not status:
478 if not status:
472 bn = destrepo[uprev].branch()
479 bn = destrepo[uprev].branch()
473 status = _("updating to branch %s\n") % bn
480 status = _("updating to branch %s\n") % bn
474 destrepo.ui.status(status)
481 destrepo.ui.status(status)
475 _update(destrepo, uprev)
482 _update(destrepo, uprev)
476 if update in destrepo._bookmarks:
483 if update in destrepo._bookmarks:
477 bookmarks.setcurrent(destrepo, update)
484 bookmarks.setcurrent(destrepo, update)
478 finally:
485 finally:
479 release(srclock, destlock)
486 release(srclock, destlock)
480 if cleandir is not None:
487 if cleandir is not None:
481 shutil.rmtree(cleandir, True)
488 shutil.rmtree(cleandir, True)
482 if srcpeer is not None:
489 if srcpeer is not None:
483 srcpeer.close()
490 srcpeer.close()
484 return srcpeer, destpeer
491 return srcpeer, destpeer
485
492
486 def _showstats(repo, stats):
493 def _showstats(repo, stats):
487 repo.ui.status(_("%d files updated, %d files merged, "
494 repo.ui.status(_("%d files updated, %d files merged, "
488 "%d files removed, %d files unresolved\n") % stats)
495 "%d files removed, %d files unresolved\n") % stats)
489
496
490 def updaterepo(repo, node, overwrite):
497 def updaterepo(repo, node, overwrite):
491 """Update the working directory to node.
498 """Update the working directory to node.
492
499
493 When overwrite is set, changes are clobbered, merged else
500 When overwrite is set, changes are clobbered, merged else
494
501
495 returns stats (see pydoc mercurial.merge.applyupdates)"""
502 returns stats (see pydoc mercurial.merge.applyupdates)"""
496 return mergemod.update(repo, node, False, overwrite, None,
503 return mergemod.update(repo, node, False, overwrite, None,
497 labels=['working copy', 'destination'])
504 labels=['working copy', 'destination'])
498
505
499 def update(repo, node):
506 def update(repo, node):
500 """update the working directory to node, merging linear changes"""
507 """update the working directory to node, merging linear changes"""
501 stats = updaterepo(repo, node, False)
508 stats = updaterepo(repo, node, False)
502 _showstats(repo, stats)
509 _showstats(repo, stats)
503 if stats[3]:
510 if stats[3]:
504 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
511 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
505 return stats[3] > 0
512 return stats[3] > 0
506
513
507 # naming conflict in clone()
514 # naming conflict in clone()
508 _update = update
515 _update = update
509
516
510 def clean(repo, node, show_stats=True):
517 def clean(repo, node, show_stats=True):
511 """forcibly switch the working directory to node, clobbering changes"""
518 """forcibly switch the working directory to node, clobbering changes"""
512 stats = updaterepo(repo, node, True)
519 stats = updaterepo(repo, node, True)
513 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
520 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
514 if show_stats:
521 if show_stats:
515 _showstats(repo, stats)
522 _showstats(repo, stats)
516 return stats[3] > 0
523 return stats[3] > 0
517
524
518 def merge(repo, node, force=None, remind=True):
525 def merge(repo, node, force=None, remind=True):
519 """Branch merge with node, resolving changes. Return true if any
526 """Branch merge with node, resolving changes. Return true if any
520 unresolved conflicts."""
527 unresolved conflicts."""
521 stats = mergemod.update(repo, node, True, force, False)
528 stats = mergemod.update(repo, node, True, force, False)
522 _showstats(repo, stats)
529 _showstats(repo, stats)
523 if stats[3]:
530 if stats[3]:
524 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
531 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
525 "or 'hg update -C .' to abandon\n"))
532 "or 'hg update -C .' to abandon\n"))
526 elif remind:
533 elif remind:
527 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
534 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
528 return stats[3] > 0
535 return stats[3] > 0
529
536
530 def _incoming(displaychlist, subreporecurse, ui, repo, source,
537 def _incoming(displaychlist, subreporecurse, ui, repo, source,
531 opts, buffered=False):
538 opts, buffered=False):
532 """
539 """
533 Helper for incoming / gincoming.
540 Helper for incoming / gincoming.
534 displaychlist gets called with
541 displaychlist gets called with
535 (remoterepo, incomingchangesetlist, displayer) parameters,
542 (remoterepo, incomingchangesetlist, displayer) parameters,
536 and is supposed to contain only code that can't be unified.
543 and is supposed to contain only code that can't be unified.
537 """
544 """
538 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
545 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
539 other = peer(repo, opts, source)
546 other = peer(repo, opts, source)
540 ui.status(_('comparing with %s\n') % util.hidepassword(source))
547 ui.status(_('comparing with %s\n') % util.hidepassword(source))
541 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
548 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
542
549
543 if revs:
550 if revs:
544 revs = [other.lookup(rev) for rev in revs]
551 revs = [other.lookup(rev) for rev in revs]
545 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
552 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
546 revs, opts["bundle"], opts["force"])
553 revs, opts["bundle"], opts["force"])
547 try:
554 try:
548 if not chlist:
555 if not chlist:
549 ui.status(_("no changes found\n"))
556 ui.status(_("no changes found\n"))
550 return subreporecurse()
557 return subreporecurse()
551
558
552 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
559 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
553 displaychlist(other, chlist, displayer)
560 displaychlist(other, chlist, displayer)
554 displayer.close()
561 displayer.close()
555 finally:
562 finally:
556 cleanupfn()
563 cleanupfn()
557 subreporecurse()
564 subreporecurse()
558 return 0 # exit code is zero since we found incoming changes
565 return 0 # exit code is zero since we found incoming changes
559
566
560 def incoming(ui, repo, source, opts):
567 def incoming(ui, repo, source, opts):
561 def subreporecurse():
568 def subreporecurse():
562 ret = 1
569 ret = 1
563 if opts.get('subrepos'):
570 if opts.get('subrepos'):
564 ctx = repo[None]
571 ctx = repo[None]
565 for subpath in sorted(ctx.substate):
572 for subpath in sorted(ctx.substate):
566 sub = ctx.sub(subpath)
573 sub = ctx.sub(subpath)
567 ret = min(ret, sub.incoming(ui, source, opts))
574 ret = min(ret, sub.incoming(ui, source, opts))
568 return ret
575 return ret
569
576
570 def display(other, chlist, displayer):
577 def display(other, chlist, displayer):
571 limit = cmdutil.loglimit(opts)
578 limit = cmdutil.loglimit(opts)
572 if opts.get('newest_first'):
579 if opts.get('newest_first'):
573 chlist.reverse()
580 chlist.reverse()
574 count = 0
581 count = 0
575 for n in chlist:
582 for n in chlist:
576 if limit is not None and count >= limit:
583 if limit is not None and count >= limit:
577 break
584 break
578 parents = [p for p in other.changelog.parents(n) if p != nullid]
585 parents = [p for p in other.changelog.parents(n) if p != nullid]
579 if opts.get('no_merges') and len(parents) == 2:
586 if opts.get('no_merges') and len(parents) == 2:
580 continue
587 continue
581 count += 1
588 count += 1
582 displayer.show(other[n])
589 displayer.show(other[n])
583 return _incoming(display, subreporecurse, ui, repo, source, opts)
590 return _incoming(display, subreporecurse, ui, repo, source, opts)
584
591
585 def _outgoing(ui, repo, dest, opts):
592 def _outgoing(ui, repo, dest, opts):
586 dest = ui.expandpath(dest or 'default-push', dest or 'default')
593 dest = ui.expandpath(dest or 'default-push', dest or 'default')
587 dest, branches = parseurl(dest, opts.get('branch'))
594 dest, branches = parseurl(dest, opts.get('branch'))
588 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
595 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
589 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
596 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
590 if revs:
597 if revs:
591 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
598 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
592
599
593 other = peer(repo, opts, dest)
600 other = peer(repo, opts, dest)
594 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
601 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
595 force=opts.get('force'))
602 force=opts.get('force'))
596 o = outgoing.missing
603 o = outgoing.missing
597 if not o:
604 if not o:
598 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
605 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
599 return o, other
606 return o, other
600
607
601 def outgoing(ui, repo, dest, opts):
608 def outgoing(ui, repo, dest, opts):
602 def recurse():
609 def recurse():
603 ret = 1
610 ret = 1
604 if opts.get('subrepos'):
611 if opts.get('subrepos'):
605 ctx = repo[None]
612 ctx = repo[None]
606 for subpath in sorted(ctx.substate):
613 for subpath in sorted(ctx.substate):
607 sub = ctx.sub(subpath)
614 sub = ctx.sub(subpath)
608 ret = min(ret, sub.outgoing(ui, dest, opts))
615 ret = min(ret, sub.outgoing(ui, dest, opts))
609 return ret
616 return ret
610
617
611 limit = cmdutil.loglimit(opts)
618 limit = cmdutil.loglimit(opts)
612 o, other = _outgoing(ui, repo, dest, opts)
619 o, other = _outgoing(ui, repo, dest, opts)
613 if not o:
620 if not o:
614 cmdutil.outgoinghooks(ui, repo, other, opts, o)
621 cmdutil.outgoinghooks(ui, repo, other, opts, o)
615 return recurse()
622 return recurse()
616
623
617 if opts.get('newest_first'):
624 if opts.get('newest_first'):
618 o.reverse()
625 o.reverse()
619 displayer = cmdutil.show_changeset(ui, repo, opts)
626 displayer = cmdutil.show_changeset(ui, repo, opts)
620 count = 0
627 count = 0
621 for n in o:
628 for n in o:
622 if limit is not None and count >= limit:
629 if limit is not None and count >= limit:
623 break
630 break
624 parents = [p for p in repo.changelog.parents(n) if p != nullid]
631 parents = [p for p in repo.changelog.parents(n) if p != nullid]
625 if opts.get('no_merges') and len(parents) == 2:
632 if opts.get('no_merges') and len(parents) == 2:
626 continue
633 continue
627 count += 1
634 count += 1
628 displayer.show(repo[n])
635 displayer.show(repo[n])
629 displayer.close()
636 displayer.close()
630 cmdutil.outgoinghooks(ui, repo, other, opts, o)
637 cmdutil.outgoinghooks(ui, repo, other, opts, o)
631 recurse()
638 recurse()
632 return 0 # exit code is zero since we found outgoing changes
639 return 0 # exit code is zero since we found outgoing changes
633
640
634 def revert(repo, node, choose):
641 def revert(repo, node, choose):
635 """revert changes to revision in node without updating dirstate"""
642 """revert changes to revision in node without updating dirstate"""
636 return mergemod.update(repo, node, False, True, choose)[3] > 0
643 return mergemod.update(repo, node, False, True, choose)[3] > 0
637
644
638 def verify(repo):
645 def verify(repo):
639 """verify the consistency of a repository"""
646 """verify the consistency of a repository"""
640 return verifymod.verify(repo)
647 return verifymod.verify(repo)
641
648
642 def remoteui(src, opts):
649 def remoteui(src, opts):
643 'build a remote ui from ui or repo and opts'
650 'build a remote ui from ui or repo and opts'
644 if util.safehasattr(src, 'baseui'): # looks like a repository
651 if util.safehasattr(src, 'baseui'): # looks like a repository
645 dst = src.baseui.copy() # drop repo-specific config
652 dst = src.baseui.copy() # drop repo-specific config
646 src = src.ui # copy target options from repo
653 src = src.ui # copy target options from repo
647 else: # assume it's a global ui object
654 else: # assume it's a global ui object
648 dst = src.copy() # keep all global options
655 dst = src.copy() # keep all global options
649
656
650 # copy ssh-specific options
657 # copy ssh-specific options
651 for o in 'ssh', 'remotecmd':
658 for o in 'ssh', 'remotecmd':
652 v = opts.get(o) or src.config('ui', o)
659 v = opts.get(o) or src.config('ui', o)
653 if v:
660 if v:
654 dst.setconfig("ui", o, v, 'copied')
661 dst.setconfig("ui", o, v, 'copied')
655
662
656 # copy bundle-specific options
663 # copy bundle-specific options
657 r = src.config('bundle', 'mainreporoot')
664 r = src.config('bundle', 'mainreporoot')
658 if r:
665 if r:
659 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
666 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
660
667
661 # copy selected local settings to the remote ui
668 # copy selected local settings to the remote ui
662 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
669 for sect in ('auth', 'hostfingerprints', 'http_proxy'):
663 for key, val in src.configitems(sect):
670 for key, val in src.configitems(sect):
664 dst.setconfig(sect, key, val, 'copied')
671 dst.setconfig(sect, key, val, 'copied')
665 v = src.config('web', 'cacerts')
672 v = src.config('web', 'cacerts')
666 if v:
673 if v:
667 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
674 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
668
675
669 return dst
676 return dst
@@ -1,189 +1,189
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup, exchange
9 from mercurial import changegroup, exchange
10 from mercurial.node import short
10 from mercurial.node import short
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import errno
12 import errno
13
13
14 def _bundle(repo, bases, heads, node, suffix, compress=True):
14 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 """create a bundle with the specified revisions as a backup"""
15 """create a bundle with the specified revisions as a backup"""
16 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
16 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
17 backupdir = "strip-backup"
17 backupdir = "strip-backup"
18 vfs = repo.vfs
18 vfs = repo.vfs
19 if not vfs.isdir(backupdir):
19 if not vfs.isdir(backupdir):
20 vfs.mkdir(backupdir)
20 vfs.mkdir(backupdir)
21 name = "%s/%s-%s.hg" % (backupdir, short(node), suffix)
21 name = "%s/%s-%s.hg" % (backupdir, short(node), suffix)
22 if compress:
22 if compress:
23 bundletype = "HG10BZ"
23 bundletype = "HG10BZ"
24 else:
24 else:
25 bundletype = "HG10UN"
25 bundletype = "HG10UN"
26 return changegroup.writebundle(cg, name, bundletype, vfs)
26 return changegroup.writebundle(cg, name, bundletype, vfs)
27
27
28 def _collectfiles(repo, striprev):
28 def _collectfiles(repo, striprev):
29 """find out the filelogs affected by the strip"""
29 """find out the filelogs affected by the strip"""
30 files = set()
30 files = set()
31
31
32 for x in xrange(striprev, len(repo)):
32 for x in xrange(striprev, len(repo)):
33 files.update(repo[x].files())
33 files.update(repo[x].files())
34
34
35 return sorted(files)
35 return sorted(files)
36
36
37 def _collectbrokencsets(repo, files, striprev):
37 def _collectbrokencsets(repo, files, striprev):
38 """return the changesets which will be broken by the truncation"""
38 """return the changesets which will be broken by the truncation"""
39 s = set()
39 s = set()
40 def collectone(revlog):
40 def collectone(revlog):
41 _, brokenset = revlog.getstrippoint(striprev)
41 _, brokenset = revlog.getstrippoint(striprev)
42 s.update([revlog.linkrev(r) for r in brokenset])
42 s.update([revlog.linkrev(r) for r in brokenset])
43
43
44 collectone(repo.manifest)
44 collectone(repo.manifest)
45 for fname in files:
45 for fname in files:
46 collectone(repo.file(fname))
46 collectone(repo.file(fname))
47
47
48 return s
48 return s
49
49
50 def strip(ui, repo, nodelist, backup=True, topic='backup'):
50 def strip(ui, repo, nodelist, backup=True, topic='backup'):
51
51
52 # Simple way to maintain backwards compatibility for this
52 # Simple way to maintain backwards compatibility for this
53 # argument.
53 # argument.
54 if backup in ['none', 'strip']:
54 if backup in ['none', 'strip']:
55 backup = False
55 backup = False
56
56
57 repo = repo.unfiltered()
57 repo = repo.unfiltered()
58 repo.destroying()
58 repo.destroying()
59
59
60 cl = repo.changelog
60 cl = repo.changelog
61 # TODO handle undo of merge sets
61 # TODO handle undo of merge sets
62 if isinstance(nodelist, str):
62 if isinstance(nodelist, str):
63 nodelist = [nodelist]
63 nodelist = [nodelist]
64 striplist = [cl.rev(node) for node in nodelist]
64 striplist = [cl.rev(node) for node in nodelist]
65 striprev = min(striplist)
65 striprev = min(striplist)
66
66
67 # Some revisions with rev > striprev may not be descendants of striprev.
67 # Some revisions with rev > striprev may not be descendants of striprev.
68 # We have to find these revisions and put them in a bundle, so that
68 # We have to find these revisions and put them in a bundle, so that
69 # we can restore them after the truncations.
69 # we can restore them after the truncations.
70 # To create the bundle we use repo.changegroupsubset which requires
70 # To create the bundle we use repo.changegroupsubset which requires
71 # the list of heads and bases of the set of interesting revisions.
71 # the list of heads and bases of the set of interesting revisions.
72 # (head = revision in the set that has no descendant in the set;
72 # (head = revision in the set that has no descendant in the set;
73 # base = revision in the set that has no ancestor in the set)
73 # base = revision in the set that has no ancestor in the set)
74 tostrip = set(striplist)
74 tostrip = set(striplist)
75 for rev in striplist:
75 for rev in striplist:
76 for desc in cl.descendants([rev]):
76 for desc in cl.descendants([rev]):
77 tostrip.add(desc)
77 tostrip.add(desc)
78
78
79 files = _collectfiles(repo, striprev)
79 files = _collectfiles(repo, striprev)
80 saverevs = _collectbrokencsets(repo, files, striprev)
80 saverevs = _collectbrokencsets(repo, files, striprev)
81
81
82 # compute heads
82 # compute heads
83 saveheads = set(saverevs)
83 saveheads = set(saverevs)
84 for r in xrange(striprev + 1, len(cl)):
84 for r in xrange(striprev + 1, len(cl)):
85 if r not in tostrip:
85 if r not in tostrip:
86 saverevs.add(r)
86 saverevs.add(r)
87 saveheads.difference_update(cl.parentrevs(r))
87 saveheads.difference_update(cl.parentrevs(r))
88 saveheads.add(r)
88 saveheads.add(r)
89 saveheads = [cl.node(r) for r in saveheads]
89 saveheads = [cl.node(r) for r in saveheads]
90
90
91 # compute base nodes
91 # compute base nodes
92 if saverevs:
92 if saverevs:
93 descendants = set(cl.descendants(saverevs))
93 descendants = set(cl.descendants(saverevs))
94 saverevs.difference_update(descendants)
94 saverevs.difference_update(descendants)
95 savebases = [cl.node(r) for r in saverevs]
95 savebases = [cl.node(r) for r in saverevs]
96 stripbases = [cl.node(r) for r in tostrip]
96 stripbases = [cl.node(r) for r in tostrip]
97
97
98 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
98 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
99 # is much faster
99 # is much faster
100 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
100 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
101 if newbmtarget:
101 if newbmtarget:
102 newbmtarget = repo[newbmtarget[0]].node()
102 newbmtarget = repo[newbmtarget.first()].node()
103 else:
103 else:
104 newbmtarget = '.'
104 newbmtarget = '.'
105
105
106 bm = repo._bookmarks
106 bm = repo._bookmarks
107 updatebm = []
107 updatebm = []
108 for m in bm:
108 for m in bm:
109 rev = repo[bm[m]].rev()
109 rev = repo[bm[m]].rev()
110 if rev in tostrip:
110 if rev in tostrip:
111 updatebm.append(m)
111 updatebm.append(m)
112
112
113 # create a changegroup for all the branches we need to keep
113 # create a changegroup for all the branches we need to keep
114 backupfile = None
114 backupfile = None
115 vfs = repo.vfs
115 vfs = repo.vfs
116 if backup:
116 if backup:
117 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
117 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
118 repo.ui.status(_("saved backup bundle to %s\n") %
118 repo.ui.status(_("saved backup bundle to %s\n") %
119 vfs.join(backupfile))
119 vfs.join(backupfile))
120 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
120 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
121 vfs.join(backupfile))
121 vfs.join(backupfile))
122 if saveheads or savebases:
122 if saveheads or savebases:
123 # do not compress partial bundle if we remove it from disk later
123 # do not compress partial bundle if we remove it from disk later
124 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
124 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
125 compress=False)
125 compress=False)
126
126
127 mfst = repo.manifest
127 mfst = repo.manifest
128
128
129 tr = repo.transaction("strip")
129 tr = repo.transaction("strip")
130 offset = len(tr.entries)
130 offset = len(tr.entries)
131
131
132 try:
132 try:
133 tr.startgroup()
133 tr.startgroup()
134 cl.strip(striprev, tr)
134 cl.strip(striprev, tr)
135 mfst.strip(striprev, tr)
135 mfst.strip(striprev, tr)
136 for fn in files:
136 for fn in files:
137 repo.file(fn).strip(striprev, tr)
137 repo.file(fn).strip(striprev, tr)
138 tr.endgroup()
138 tr.endgroup()
139
139
140 try:
140 try:
141 for i in xrange(offset, len(tr.entries)):
141 for i in xrange(offset, len(tr.entries)):
142 file, troffset, ignore = tr.entries[i]
142 file, troffset, ignore = tr.entries[i]
143 repo.sopener(file, 'a').truncate(troffset)
143 repo.sopener(file, 'a').truncate(troffset)
144 if troffset == 0:
144 if troffset == 0:
145 repo.store.markremoved(file)
145 repo.store.markremoved(file)
146 tr.close()
146 tr.close()
147 except: # re-raises
147 except: # re-raises
148 tr.abort()
148 tr.abort()
149 raise
149 raise
150
150
151 if saveheads or savebases:
151 if saveheads or savebases:
152 ui.note(_("adding branch\n"))
152 ui.note(_("adding branch\n"))
153 f = vfs.open(chgrpfile, "rb")
153 f = vfs.open(chgrpfile, "rb")
154 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
154 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
155 if not repo.ui.verbose:
155 if not repo.ui.verbose:
156 # silence internal shuffling chatter
156 # silence internal shuffling chatter
157 repo.ui.pushbuffer()
157 repo.ui.pushbuffer()
158 changegroup.addchangegroup(repo, gen, 'strip',
158 changegroup.addchangegroup(repo, gen, 'strip',
159 'bundle:' + vfs.join(chgrpfile), True)
159 'bundle:' + vfs.join(chgrpfile), True)
160 if not repo.ui.verbose:
160 if not repo.ui.verbose:
161 repo.ui.popbuffer()
161 repo.ui.popbuffer()
162 f.close()
162 f.close()
163
163
164 # remove undo files
164 # remove undo files
165 for undovfs, undofile in repo.undofiles():
165 for undovfs, undofile in repo.undofiles():
166 try:
166 try:
167 undovfs.unlink(undofile)
167 undovfs.unlink(undofile)
168 except OSError, e:
168 except OSError, e:
169 if e.errno != errno.ENOENT:
169 if e.errno != errno.ENOENT:
170 ui.warn(_('error removing %s: %s\n') %
170 ui.warn(_('error removing %s: %s\n') %
171 (undovfs.join(undofile), str(e)))
171 (undovfs.join(undofile), str(e)))
172
172
173 for m in updatebm:
173 for m in updatebm:
174 bm[m] = repo[newbmtarget].node()
174 bm[m] = repo[newbmtarget].node()
175 bm.write()
175 bm.write()
176 except: # re-raises
176 except: # re-raises
177 if backupfile:
177 if backupfile:
178 ui.warn(_("strip failed, full bundle stored in '%s'\n")
178 ui.warn(_("strip failed, full bundle stored in '%s'\n")
179 % vfs.join(backupfile))
179 % vfs.join(backupfile))
180 elif saveheads:
180 elif saveheads:
181 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
181 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
182 % vfs.join(chgrpfile))
182 % vfs.join(chgrpfile))
183 raise
183 raise
184 else:
184 else:
185 if saveheads or savebases:
185 if saveheads or savebases:
186 # Remove partial backup only if there were no exceptions
186 # Remove partial backup only if there were no exceptions
187 vfs.unlink(chgrpfile)
187 vfs.unlink(chgrpfile)
188
188
189 repo.destroyed()
189 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now