##// END OF EJS Templates
clone: add progress calls to uncompressed code path
Augie Fackler -
r16770:b3435385 default
parent child Browse files
Show More
@@ -1,2364 +1,2370
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 def bookmarkheads(self, bookmark):
183 def bookmarkheads(self, bookmark):
184 name = bookmark.split('@', 1)[0]
184 name = bookmark.split('@', 1)[0]
185 heads = []
185 heads = []
186 for mark, n in self._bookmarks.iteritems():
186 for mark, n in self._bookmarks.iteritems():
187 if mark.split('@', 1)[0] == name:
187 if mark.split('@', 1)[0] == name:
188 heads.append(n)
188 heads.append(n)
189 return heads
189 return heads
190
190
191 @storecache('phaseroots')
191 @storecache('phaseroots')
192 def _phasecache(self):
192 def _phasecache(self):
193 return phases.phasecache(self, self._phasedefaults)
193 return phases.phasecache(self, self._phasedefaults)
194
194
195 @storecache('00changelog.i')
195 @storecache('00changelog.i')
196 def changelog(self):
196 def changelog(self):
197 c = changelog.changelog(self.sopener)
197 c = changelog.changelog(self.sopener)
198 if 'HG_PENDING' in os.environ:
198 if 'HG_PENDING' in os.environ:
199 p = os.environ['HG_PENDING']
199 p = os.environ['HG_PENDING']
200 if p.startswith(self.root):
200 if p.startswith(self.root):
201 c.readpending('00changelog.i.a')
201 c.readpending('00changelog.i.a')
202 return c
202 return c
203
203
204 @storecache('00manifest.i')
204 @storecache('00manifest.i')
205 def manifest(self):
205 def manifest(self):
206 return manifest.manifest(self.sopener)
206 return manifest.manifest(self.sopener)
207
207
208 @filecache('dirstate')
208 @filecache('dirstate')
209 def dirstate(self):
209 def dirstate(self):
210 warned = [0]
210 warned = [0]
211 def validate(node):
211 def validate(node):
212 try:
212 try:
213 self.changelog.rev(node)
213 self.changelog.rev(node)
214 return node
214 return node
215 except error.LookupError:
215 except error.LookupError:
216 if not warned[0]:
216 if not warned[0]:
217 warned[0] = True
217 warned[0] = True
218 self.ui.warn(_("warning: ignoring unknown"
218 self.ui.warn(_("warning: ignoring unknown"
219 " working parent %s!\n") % short(node))
219 " working parent %s!\n") % short(node))
220 return nullid
220 return nullid
221
221
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223
223
224 def __getitem__(self, changeid):
224 def __getitem__(self, changeid):
225 if changeid is None:
225 if changeid is None:
226 return context.workingctx(self)
226 return context.workingctx(self)
227 return context.changectx(self, changeid)
227 return context.changectx(self, changeid)
228
228
229 def __contains__(self, changeid):
229 def __contains__(self, changeid):
230 try:
230 try:
231 return bool(self.lookup(changeid))
231 return bool(self.lookup(changeid))
232 except error.RepoLookupError:
232 except error.RepoLookupError:
233 return False
233 return False
234
234
235 def __nonzero__(self):
235 def __nonzero__(self):
236 return True
236 return True
237
237
238 def __len__(self):
238 def __len__(self):
239 return len(self.changelog)
239 return len(self.changelog)
240
240
241 def __iter__(self):
241 def __iter__(self):
242 for i in xrange(len(self)):
242 for i in xrange(len(self)):
243 yield i
243 yield i
244
244
245 def revs(self, expr, *args):
245 def revs(self, expr, *args):
246 '''Return a list of revisions matching the given revset'''
246 '''Return a list of revisions matching the given revset'''
247 expr = revset.formatspec(expr, *args)
247 expr = revset.formatspec(expr, *args)
248 m = revset.match(None, expr)
248 m = revset.match(None, expr)
249 return [r for r in m(self, range(len(self)))]
249 return [r for r in m(self, range(len(self)))]
250
250
251 def set(self, expr, *args):
251 def set(self, expr, *args):
252 '''
252 '''
253 Yield a context for each matching revision, after doing arg
253 Yield a context for each matching revision, after doing arg
254 replacement via revset.formatspec
254 replacement via revset.formatspec
255 '''
255 '''
256 for r in self.revs(expr, *args):
256 for r in self.revs(expr, *args):
257 yield self[r]
257 yield self[r]
258
258
259 def url(self):
259 def url(self):
260 return 'file:' + self.root
260 return 'file:' + self.root
261
261
262 def hook(self, name, throw=False, **args):
262 def hook(self, name, throw=False, **args):
263 return hook.hook(self.ui, self, name, throw, **args)
263 return hook.hook(self.ui, self, name, throw, **args)
264
264
265 tag_disallowed = ':\r\n'
265 tag_disallowed = ':\r\n'
266
266
267 def _tag(self, names, node, message, local, user, date, extra={}):
267 def _tag(self, names, node, message, local, user, date, extra={}):
268 if isinstance(names, str):
268 if isinstance(names, str):
269 allchars = names
269 allchars = names
270 names = (names,)
270 names = (names,)
271 else:
271 else:
272 allchars = ''.join(names)
272 allchars = ''.join(names)
273 for c in self.tag_disallowed:
273 for c in self.tag_disallowed:
274 if c in allchars:
274 if c in allchars:
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
276
276
277 branches = self.branchmap()
277 branches = self.branchmap()
278 for name in names:
278 for name in names:
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 local=local)
280 local=local)
281 if name in branches:
281 if name in branches:
282 self.ui.warn(_("warning: tag %s conflicts with existing"
282 self.ui.warn(_("warning: tag %s conflicts with existing"
283 " branch name\n") % name)
283 " branch name\n") % name)
284
284
285 def writetags(fp, names, munge, prevtags):
285 def writetags(fp, names, munge, prevtags):
286 fp.seek(0, 2)
286 fp.seek(0, 2)
287 if prevtags and prevtags[-1] != '\n':
287 if prevtags and prevtags[-1] != '\n':
288 fp.write('\n')
288 fp.write('\n')
289 for name in names:
289 for name in names:
290 m = munge and munge(name) or name
290 m = munge and munge(name) or name
291 if (self._tagscache.tagtypes and
291 if (self._tagscache.tagtypes and
292 name in self._tagscache.tagtypes):
292 name in self._tagscache.tagtypes):
293 old = self.tags().get(name, nullid)
293 old = self.tags().get(name, nullid)
294 fp.write('%s %s\n' % (hex(old), m))
294 fp.write('%s %s\n' % (hex(old), m))
295 fp.write('%s %s\n' % (hex(node), m))
295 fp.write('%s %s\n' % (hex(node), m))
296 fp.close()
296 fp.close()
297
297
298 prevtags = ''
298 prevtags = ''
299 if local:
299 if local:
300 try:
300 try:
301 fp = self.opener('localtags', 'r+')
301 fp = self.opener('localtags', 'r+')
302 except IOError:
302 except IOError:
303 fp = self.opener('localtags', 'a')
303 fp = self.opener('localtags', 'a')
304 else:
304 else:
305 prevtags = fp.read()
305 prevtags = fp.read()
306
306
307 # local tags are stored in the current charset
307 # local tags are stored in the current charset
308 writetags(fp, names, None, prevtags)
308 writetags(fp, names, None, prevtags)
309 for name in names:
309 for name in names:
310 self.hook('tag', node=hex(node), tag=name, local=local)
310 self.hook('tag', node=hex(node), tag=name, local=local)
311 return
311 return
312
312
313 try:
313 try:
314 fp = self.wfile('.hgtags', 'rb+')
314 fp = self.wfile('.hgtags', 'rb+')
315 except IOError, e:
315 except IOError, e:
316 if e.errno != errno.ENOENT:
316 if e.errno != errno.ENOENT:
317 raise
317 raise
318 fp = self.wfile('.hgtags', 'ab')
318 fp = self.wfile('.hgtags', 'ab')
319 else:
319 else:
320 prevtags = fp.read()
320 prevtags = fp.read()
321
321
322 # committed tags are stored in UTF-8
322 # committed tags are stored in UTF-8
323 writetags(fp, names, encoding.fromlocal, prevtags)
323 writetags(fp, names, encoding.fromlocal, prevtags)
324
324
325 fp.close()
325 fp.close()
326
326
327 self.invalidatecaches()
327 self.invalidatecaches()
328
328
329 if '.hgtags' not in self.dirstate:
329 if '.hgtags' not in self.dirstate:
330 self[None].add(['.hgtags'])
330 self[None].add(['.hgtags'])
331
331
332 m = matchmod.exact(self.root, '', ['.hgtags'])
332 m = matchmod.exact(self.root, '', ['.hgtags'])
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
334
334
335 for name in names:
335 for name in names:
336 self.hook('tag', node=hex(node), tag=name, local=local)
336 self.hook('tag', node=hex(node), tag=name, local=local)
337
337
338 return tagnode
338 return tagnode
339
339
340 def tag(self, names, node, message, local, user, date):
340 def tag(self, names, node, message, local, user, date):
341 '''tag a revision with one or more symbolic names.
341 '''tag a revision with one or more symbolic names.
342
342
343 names is a list of strings or, when adding a single tag, names may be a
343 names is a list of strings or, when adding a single tag, names may be a
344 string.
344 string.
345
345
346 if local is True, the tags are stored in a per-repository file.
346 if local is True, the tags are stored in a per-repository file.
347 otherwise, they are stored in the .hgtags file, and a new
347 otherwise, they are stored in the .hgtags file, and a new
348 changeset is committed with the change.
348 changeset is committed with the change.
349
349
350 keyword arguments:
350 keyword arguments:
351
351
352 local: whether to store tags in non-version-controlled file
352 local: whether to store tags in non-version-controlled file
353 (default False)
353 (default False)
354
354
355 message: commit message to use if committing
355 message: commit message to use if committing
356
356
357 user: name of user to use if committing
357 user: name of user to use if committing
358
358
359 date: date tuple to use if committing'''
359 date: date tuple to use if committing'''
360
360
361 if not local:
361 if not local:
362 for x in self.status()[:5]:
362 for x in self.status()[:5]:
363 if '.hgtags' in x:
363 if '.hgtags' in x:
364 raise util.Abort(_('working copy of .hgtags is changed '
364 raise util.Abort(_('working copy of .hgtags is changed '
365 '(please commit .hgtags manually)'))
365 '(please commit .hgtags manually)'))
366
366
367 self.tags() # instantiate the cache
367 self.tags() # instantiate the cache
368 self._tag(names, node, message, local, user, date)
368 self._tag(names, node, message, local, user, date)
369
369
370 @propertycache
370 @propertycache
371 def _tagscache(self):
371 def _tagscache(self):
372 '''Returns a tagscache object that contains various tags related
372 '''Returns a tagscache object that contains various tags related
373 caches.'''
373 caches.'''
374
374
375 # This simplifies its cache management by having one decorated
375 # This simplifies its cache management by having one decorated
376 # function (this one) and the rest simply fetch things from it.
376 # function (this one) and the rest simply fetch things from it.
377 class tagscache(object):
377 class tagscache(object):
378 def __init__(self):
378 def __init__(self):
379 # These two define the set of tags for this repository. tags
379 # These two define the set of tags for this repository. tags
380 # maps tag name to node; tagtypes maps tag name to 'global' or
380 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # 'local'. (Global tags are defined by .hgtags across all
381 # 'local'. (Global tags are defined by .hgtags across all
382 # heads, and local tags are defined in .hg/localtags.)
382 # heads, and local tags are defined in .hg/localtags.)
383 # They constitute the in-memory cache of tags.
383 # They constitute the in-memory cache of tags.
384 self.tags = self.tagtypes = None
384 self.tags = self.tagtypes = None
385
385
386 self.nodetagscache = self.tagslist = None
386 self.nodetagscache = self.tagslist = None
387
387
388 cache = tagscache()
388 cache = tagscache()
389 cache.tags, cache.tagtypes = self._findtags()
389 cache.tags, cache.tagtypes = self._findtags()
390
390
391 return cache
391 return cache
392
392
393 def tags(self):
393 def tags(self):
394 '''return a mapping of tag to node'''
394 '''return a mapping of tag to node'''
395 t = {}
395 t = {}
396 for k, v in self._tagscache.tags.iteritems():
396 for k, v in self._tagscache.tags.iteritems():
397 try:
397 try:
398 # ignore tags to unknown nodes
398 # ignore tags to unknown nodes
399 self.changelog.rev(v)
399 self.changelog.rev(v)
400 t[k] = v
400 t[k] = v
401 except (error.LookupError, ValueError):
401 except (error.LookupError, ValueError):
402 pass
402 pass
403 return t
403 return t
404
404
405 def _findtags(self):
405 def _findtags(self):
406 '''Do the hard work of finding tags. Return a pair of dicts
406 '''Do the hard work of finding tags. Return a pair of dicts
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 maps tag name to a string like \'global\' or \'local\'.
408 maps tag name to a string like \'global\' or \'local\'.
409 Subclasses or extensions are free to add their own tags, but
409 Subclasses or extensions are free to add their own tags, but
410 should be aware that the returned dicts will be retained for the
410 should be aware that the returned dicts will be retained for the
411 duration of the localrepo object.'''
411 duration of the localrepo object.'''
412
412
413 # XXX what tagtype should subclasses/extensions use? Currently
413 # XXX what tagtype should subclasses/extensions use? Currently
414 # mq and bookmarks add tags, but do not set the tagtype at all.
414 # mq and bookmarks add tags, but do not set the tagtype at all.
415 # Should each extension invent its own tag type? Should there
415 # Should each extension invent its own tag type? Should there
416 # be one tagtype for all such "virtual" tags? Or is the status
416 # be one tagtype for all such "virtual" tags? Or is the status
417 # quo fine?
417 # quo fine?
418
418
419 alltags = {} # map tag name to (node, hist)
419 alltags = {} # map tag name to (node, hist)
420 tagtypes = {}
420 tagtypes = {}
421
421
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424
424
425 # Build the return dicts. Have to re-encode tag names because
425 # Build the return dicts. Have to re-encode tag names because
426 # the tags module always uses UTF-8 (in order not to lose info
426 # the tags module always uses UTF-8 (in order not to lose info
427 # writing to the cache), but the rest of Mercurial wants them in
427 # writing to the cache), but the rest of Mercurial wants them in
428 # local encoding.
428 # local encoding.
429 tags = {}
429 tags = {}
430 for (name, (node, hist)) in alltags.iteritems():
430 for (name, (node, hist)) in alltags.iteritems():
431 if node != nullid:
431 if node != nullid:
432 tags[encoding.tolocal(name)] = node
432 tags[encoding.tolocal(name)] = node
433 tags['tip'] = self.changelog.tip()
433 tags['tip'] = self.changelog.tip()
434 tagtypes = dict([(encoding.tolocal(name), value)
434 tagtypes = dict([(encoding.tolocal(name), value)
435 for (name, value) in tagtypes.iteritems()])
435 for (name, value) in tagtypes.iteritems()])
436 return (tags, tagtypes)
436 return (tags, tagtypes)
437
437
438 def tagtype(self, tagname):
438 def tagtype(self, tagname):
439 '''
439 '''
440 return the type of the given tag. result can be:
440 return the type of the given tag. result can be:
441
441
442 'local' : a local tag
442 'local' : a local tag
443 'global' : a global tag
443 'global' : a global tag
444 None : tag does not exist
444 None : tag does not exist
445 '''
445 '''
446
446
447 return self._tagscache.tagtypes.get(tagname)
447 return self._tagscache.tagtypes.get(tagname)
448
448
449 def tagslist(self):
449 def tagslist(self):
450 '''return a list of tags ordered by revision'''
450 '''return a list of tags ordered by revision'''
451 if not self._tagscache.tagslist:
451 if not self._tagscache.tagslist:
452 l = []
452 l = []
453 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
454 r = self.changelog.rev(n)
454 r = self.changelog.rev(n)
455 l.append((r, t, n))
455 l.append((r, t, n))
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457
457
458 return self._tagscache.tagslist
458 return self._tagscache.tagslist
459
459
460 def nodetags(self, node):
460 def nodetags(self, node):
461 '''return the tags associated with a node'''
461 '''return the tags associated with a node'''
462 if not self._tagscache.nodetagscache:
462 if not self._tagscache.nodetagscache:
463 nodetagscache = {}
463 nodetagscache = {}
464 for t, n in self._tagscache.tags.iteritems():
464 for t, n in self._tagscache.tags.iteritems():
465 nodetagscache.setdefault(n, []).append(t)
465 nodetagscache.setdefault(n, []).append(t)
466 for tags in nodetagscache.itervalues():
466 for tags in nodetagscache.itervalues():
467 tags.sort()
467 tags.sort()
468 self._tagscache.nodetagscache = nodetagscache
468 self._tagscache.nodetagscache = nodetagscache
469 return self._tagscache.nodetagscache.get(node, [])
469 return self._tagscache.nodetagscache.get(node, [])
470
470
471 def nodebookmarks(self, node):
471 def nodebookmarks(self, node):
472 marks = []
472 marks = []
473 for bookmark, n in self._bookmarks.iteritems():
473 for bookmark, n in self._bookmarks.iteritems():
474 if n == node:
474 if n == node:
475 marks.append(bookmark)
475 marks.append(bookmark)
476 return sorted(marks)
476 return sorted(marks)
477
477
478 def _branchtags(self, partial, lrev):
478 def _branchtags(self, partial, lrev):
479 # TODO: rename this function?
479 # TODO: rename this function?
480 tiprev = len(self) - 1
480 tiprev = len(self) - 1
481 if lrev != tiprev:
481 if lrev != tiprev:
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 self._updatebranchcache(partial, ctxgen)
483 self._updatebranchcache(partial, ctxgen)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485
485
486 return partial
486 return partial
487
487
488 def updatebranchcache(self):
488 def updatebranchcache(self):
489 tip = self.changelog.tip()
489 tip = self.changelog.tip()
490 if self._branchcache is not None and self._branchcachetip == tip:
490 if self._branchcache is not None and self._branchcachetip == tip:
491 return
491 return
492
492
493 oldtip = self._branchcachetip
493 oldtip = self._branchcachetip
494 self._branchcachetip = tip
494 self._branchcachetip = tip
495 if oldtip is None or oldtip not in self.changelog.nodemap:
495 if oldtip is None or oldtip not in self.changelog.nodemap:
496 partial, last, lrev = self._readbranchcache()
496 partial, last, lrev = self._readbranchcache()
497 else:
497 else:
498 lrev = self.changelog.rev(oldtip)
498 lrev = self.changelog.rev(oldtip)
499 partial = self._branchcache
499 partial = self._branchcache
500
500
501 self._branchtags(partial, lrev)
501 self._branchtags(partial, lrev)
502 # this private cache holds all heads (not just the branch tips)
502 # this private cache holds all heads (not just the branch tips)
503 self._branchcache = partial
503 self._branchcache = partial
504
504
505 def branchmap(self):
505 def branchmap(self):
506 '''returns a dictionary {branch: [branchheads]}'''
506 '''returns a dictionary {branch: [branchheads]}'''
507 self.updatebranchcache()
507 self.updatebranchcache()
508 return self._branchcache
508 return self._branchcache
509
509
510 def _branchtip(self, heads):
510 def _branchtip(self, heads):
511 '''return the tipmost branch head in heads'''
511 '''return the tipmost branch head in heads'''
512 tip = heads[-1]
512 tip = heads[-1]
513 for h in reversed(heads):
513 for h in reversed(heads):
514 if not self[h].closesbranch():
514 if not self[h].closesbranch():
515 tip = h
515 tip = h
516 break
516 break
517 return tip
517 return tip
518
518
519 def branchtip(self, branch):
519 def branchtip(self, branch):
520 '''return the tip node for a given branch'''
520 '''return the tip node for a given branch'''
521 if branch not in self.branchmap():
521 if branch not in self.branchmap():
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
523 return self._branchtip(self.branchmap()[branch])
523 return self._branchtip(self.branchmap()[branch])
524
524
525 def branchtags(self):
525 def branchtags(self):
526 '''return a dict where branch names map to the tipmost head of
526 '''return a dict where branch names map to the tipmost head of
527 the branch, open heads come before closed'''
527 the branch, open heads come before closed'''
528 bt = {}
528 bt = {}
529 for bn, heads in self.branchmap().iteritems():
529 for bn, heads in self.branchmap().iteritems():
530 bt[bn] = self._branchtip(heads)
530 bt[bn] = self._branchtip(heads)
531 return bt
531 return bt
532
532
533 def _readbranchcache(self):
533 def _readbranchcache(self):
534 partial = {}
534 partial = {}
535 try:
535 try:
536 f = self.opener("cache/branchheads")
536 f = self.opener("cache/branchheads")
537 lines = f.read().split('\n')
537 lines = f.read().split('\n')
538 f.close()
538 f.close()
539 except (IOError, OSError):
539 except (IOError, OSError):
540 return {}, nullid, nullrev
540 return {}, nullid, nullrev
541
541
542 try:
542 try:
543 last, lrev = lines.pop(0).split(" ", 1)
543 last, lrev = lines.pop(0).split(" ", 1)
544 last, lrev = bin(last), int(lrev)
544 last, lrev = bin(last), int(lrev)
545 if lrev >= len(self) or self[lrev].node() != last:
545 if lrev >= len(self) or self[lrev].node() != last:
546 # invalidate the cache
546 # invalidate the cache
547 raise ValueError('invalidating branch cache (tip differs)')
547 raise ValueError('invalidating branch cache (tip differs)')
548 for l in lines:
548 for l in lines:
549 if not l:
549 if not l:
550 continue
550 continue
551 node, label = l.split(" ", 1)
551 node, label = l.split(" ", 1)
552 label = encoding.tolocal(label.strip())
552 label = encoding.tolocal(label.strip())
553 partial.setdefault(label, []).append(bin(node))
553 partial.setdefault(label, []).append(bin(node))
554 except KeyboardInterrupt:
554 except KeyboardInterrupt:
555 raise
555 raise
556 except Exception, inst:
556 except Exception, inst:
557 if self.ui.debugflag:
557 if self.ui.debugflag:
558 self.ui.warn(str(inst), '\n')
558 self.ui.warn(str(inst), '\n')
559 partial, last, lrev = {}, nullid, nullrev
559 partial, last, lrev = {}, nullid, nullrev
560 return partial, last, lrev
560 return partial, last, lrev
561
561
562 def _writebranchcache(self, branches, tip, tiprev):
562 def _writebranchcache(self, branches, tip, tiprev):
563 try:
563 try:
564 f = self.opener("cache/branchheads", "w", atomictemp=True)
564 f = self.opener("cache/branchheads", "w", atomictemp=True)
565 f.write("%s %s\n" % (hex(tip), tiprev))
565 f.write("%s %s\n" % (hex(tip), tiprev))
566 for label, nodes in branches.iteritems():
566 for label, nodes in branches.iteritems():
567 for node in nodes:
567 for node in nodes:
568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
569 f.close()
569 f.close()
570 except (IOError, OSError):
570 except (IOError, OSError):
571 pass
571 pass
572
572
573 def _updatebranchcache(self, partial, ctxgen):
573 def _updatebranchcache(self, partial, ctxgen):
574 # collect new branch entries
574 # collect new branch entries
575 newbranches = {}
575 newbranches = {}
576 for c in ctxgen:
576 for c in ctxgen:
577 newbranches.setdefault(c.branch(), []).append(c.node())
577 newbranches.setdefault(c.branch(), []).append(c.node())
578 # if older branchheads are reachable from new ones, they aren't
578 # if older branchheads are reachable from new ones, they aren't
579 # really branchheads. Note checking parents is insufficient:
579 # really branchheads. Note checking parents is insufficient:
580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
581 for branch, newnodes in newbranches.iteritems():
581 for branch, newnodes in newbranches.iteritems():
582 bheads = partial.setdefault(branch, [])
582 bheads = partial.setdefault(branch, [])
583 bheads.extend(newnodes)
583 bheads.extend(newnodes)
584 if len(bheads) <= 1:
584 if len(bheads) <= 1:
585 continue
585 continue
586 bheads = sorted(bheads, key=lambda x: self[x].rev())
586 bheads = sorted(bheads, key=lambda x: self[x].rev())
587 # starting from tip means fewer passes over reachable
587 # starting from tip means fewer passes over reachable
588 while newnodes:
588 while newnodes:
589 latest = newnodes.pop()
589 latest = newnodes.pop()
590 if latest not in bheads:
590 if latest not in bheads:
591 continue
591 continue
592 minbhnode = self[bheads[0]].node()
592 minbhnode = self[bheads[0]].node()
593 reachable = self.changelog.reachable(latest, minbhnode)
593 reachable = self.changelog.reachable(latest, minbhnode)
594 reachable.remove(latest)
594 reachable.remove(latest)
595 if reachable:
595 if reachable:
596 bheads = [b for b in bheads if b not in reachable]
596 bheads = [b for b in bheads if b not in reachable]
597 partial[branch] = bheads
597 partial[branch] = bheads
598
598
599 def lookup(self, key):
599 def lookup(self, key):
600 return self[key].node()
600 return self[key].node()
601
601
602 def lookupbranch(self, key, remote=None):
602 def lookupbranch(self, key, remote=None):
603 repo = remote or self
603 repo = remote or self
604 if key in repo.branchmap():
604 if key in repo.branchmap():
605 return key
605 return key
606
606
607 repo = (remote and remote.local()) and remote or self
607 repo = (remote and remote.local()) and remote or self
608 return repo[key].branch()
608 return repo[key].branch()
609
609
610 def known(self, nodes):
610 def known(self, nodes):
611 nm = self.changelog.nodemap
611 nm = self.changelog.nodemap
612 pc = self._phasecache
612 pc = self._phasecache
613 result = []
613 result = []
614 for n in nodes:
614 for n in nodes:
615 r = nm.get(n)
615 r = nm.get(n)
616 resp = not (r is None or pc.phase(self, r) >= phases.secret)
616 resp = not (r is None or pc.phase(self, r) >= phases.secret)
617 result.append(resp)
617 result.append(resp)
618 return result
618 return result
619
619
620 def local(self):
620 def local(self):
621 return self
621 return self
622
622
623 def join(self, f):
623 def join(self, f):
624 return os.path.join(self.path, f)
624 return os.path.join(self.path, f)
625
625
626 def wjoin(self, f):
626 def wjoin(self, f):
627 return os.path.join(self.root, f)
627 return os.path.join(self.root, f)
628
628
629 def file(self, f):
629 def file(self, f):
630 if f[0] == '/':
630 if f[0] == '/':
631 f = f[1:]
631 f = f[1:]
632 return filelog.filelog(self.sopener, f)
632 return filelog.filelog(self.sopener, f)
633
633
634 def changectx(self, changeid):
634 def changectx(self, changeid):
635 return self[changeid]
635 return self[changeid]
636
636
637 def parents(self, changeid=None):
637 def parents(self, changeid=None):
638 '''get list of changectxs for parents of changeid'''
638 '''get list of changectxs for parents of changeid'''
639 return self[changeid].parents()
639 return self[changeid].parents()
640
640
641 def setparents(self, p1, p2=nullid):
641 def setparents(self, p1, p2=nullid):
642 copies = self.dirstate.setparents(p1, p2)
642 copies = self.dirstate.setparents(p1, p2)
643 if copies:
643 if copies:
644 # Adjust copy records, the dirstate cannot do it, it
644 # Adjust copy records, the dirstate cannot do it, it
645 # requires access to parents manifests. Preserve them
645 # requires access to parents manifests. Preserve them
646 # only for entries added to first parent.
646 # only for entries added to first parent.
647 pctx = self[p1]
647 pctx = self[p1]
648 for f in copies:
648 for f in copies:
649 if f not in pctx and copies[f] in pctx:
649 if f not in pctx and copies[f] in pctx:
650 self.dirstate.copy(copies[f], f)
650 self.dirstate.copy(copies[f], f)
651
651
652 def filectx(self, path, changeid=None, fileid=None):
652 def filectx(self, path, changeid=None, fileid=None):
653 """changeid can be a changeset revision, node, or tag.
653 """changeid can be a changeset revision, node, or tag.
654 fileid can be a file revision or node."""
654 fileid can be a file revision or node."""
655 return context.filectx(self, path, changeid, fileid)
655 return context.filectx(self, path, changeid, fileid)
656
656
657 def getcwd(self):
657 def getcwd(self):
658 return self.dirstate.getcwd()
658 return self.dirstate.getcwd()
659
659
660 def pathto(self, f, cwd=None):
660 def pathto(self, f, cwd=None):
661 return self.dirstate.pathto(f, cwd)
661 return self.dirstate.pathto(f, cwd)
662
662
663 def wfile(self, f, mode='r'):
663 def wfile(self, f, mode='r'):
664 return self.wopener(f, mode)
664 return self.wopener(f, mode)
665
665
666 def _link(self, f):
666 def _link(self, f):
667 return os.path.islink(self.wjoin(f))
667 return os.path.islink(self.wjoin(f))
668
668
669 def _loadfilter(self, filter):
669 def _loadfilter(self, filter):
670 if filter not in self.filterpats:
670 if filter not in self.filterpats:
671 l = []
671 l = []
672 for pat, cmd in self.ui.configitems(filter):
672 for pat, cmd in self.ui.configitems(filter):
673 if cmd == '!':
673 if cmd == '!':
674 continue
674 continue
675 mf = matchmod.match(self.root, '', [pat])
675 mf = matchmod.match(self.root, '', [pat])
676 fn = None
676 fn = None
677 params = cmd
677 params = cmd
678 for name, filterfn in self._datafilters.iteritems():
678 for name, filterfn in self._datafilters.iteritems():
679 if cmd.startswith(name):
679 if cmd.startswith(name):
680 fn = filterfn
680 fn = filterfn
681 params = cmd[len(name):].lstrip()
681 params = cmd[len(name):].lstrip()
682 break
682 break
683 if not fn:
683 if not fn:
684 fn = lambda s, c, **kwargs: util.filter(s, c)
684 fn = lambda s, c, **kwargs: util.filter(s, c)
685 # Wrap old filters not supporting keyword arguments
685 # Wrap old filters not supporting keyword arguments
686 if not inspect.getargspec(fn)[2]:
686 if not inspect.getargspec(fn)[2]:
687 oldfn = fn
687 oldfn = fn
688 fn = lambda s, c, **kwargs: oldfn(s, c)
688 fn = lambda s, c, **kwargs: oldfn(s, c)
689 l.append((mf, fn, params))
689 l.append((mf, fn, params))
690 self.filterpats[filter] = l
690 self.filterpats[filter] = l
691 return self.filterpats[filter]
691 return self.filterpats[filter]
692
692
693 def _filter(self, filterpats, filename, data):
693 def _filter(self, filterpats, filename, data):
694 for mf, fn, cmd in filterpats:
694 for mf, fn, cmd in filterpats:
695 if mf(filename):
695 if mf(filename):
696 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
696 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
697 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
697 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
698 break
698 break
699
699
700 return data
700 return data
701
701
702 @propertycache
702 @propertycache
703 def _encodefilterpats(self):
703 def _encodefilterpats(self):
704 return self._loadfilter('encode')
704 return self._loadfilter('encode')
705
705
706 @propertycache
706 @propertycache
707 def _decodefilterpats(self):
707 def _decodefilterpats(self):
708 return self._loadfilter('decode')
708 return self._loadfilter('decode')
709
709
710 def adddatafilter(self, name, filter):
710 def adddatafilter(self, name, filter):
711 self._datafilters[name] = filter
711 self._datafilters[name] = filter
712
712
713 def wread(self, filename):
713 def wread(self, filename):
714 if self._link(filename):
714 if self._link(filename):
715 data = os.readlink(self.wjoin(filename))
715 data = os.readlink(self.wjoin(filename))
716 else:
716 else:
717 data = self.wopener.read(filename)
717 data = self.wopener.read(filename)
718 return self._filter(self._encodefilterpats, filename, data)
718 return self._filter(self._encodefilterpats, filename, data)
719
719
720 def wwrite(self, filename, data, flags):
720 def wwrite(self, filename, data, flags):
721 data = self._filter(self._decodefilterpats, filename, data)
721 data = self._filter(self._decodefilterpats, filename, data)
722 if 'l' in flags:
722 if 'l' in flags:
723 self.wopener.symlink(data, filename)
723 self.wopener.symlink(data, filename)
724 else:
724 else:
725 self.wopener.write(filename, data)
725 self.wopener.write(filename, data)
726 if 'x' in flags:
726 if 'x' in flags:
727 util.setflags(self.wjoin(filename), False, True)
727 util.setflags(self.wjoin(filename), False, True)
728
728
729 def wwritedata(self, filename, data):
729 def wwritedata(self, filename, data):
730 return self._filter(self._decodefilterpats, filename, data)
730 return self._filter(self._decodefilterpats, filename, data)
731
731
732 def transaction(self, desc):
732 def transaction(self, desc):
733 tr = self._transref and self._transref() or None
733 tr = self._transref and self._transref() or None
734 if tr and tr.running():
734 if tr and tr.running():
735 return tr.nest()
735 return tr.nest()
736
736
737 # abort here if the journal already exists
737 # abort here if the journal already exists
738 if os.path.exists(self.sjoin("journal")):
738 if os.path.exists(self.sjoin("journal")):
739 raise error.RepoError(
739 raise error.RepoError(
740 _("abandoned transaction found - run hg recover"))
740 _("abandoned transaction found - run hg recover"))
741
741
742 self._writejournal(desc)
742 self._writejournal(desc)
743 renames = [(x, undoname(x)) for x in self._journalfiles()]
743 renames = [(x, undoname(x)) for x in self._journalfiles()]
744
744
745 tr = transaction.transaction(self.ui.warn, self.sopener,
745 tr = transaction.transaction(self.ui.warn, self.sopener,
746 self.sjoin("journal"),
746 self.sjoin("journal"),
747 aftertrans(renames),
747 aftertrans(renames),
748 self.store.createmode)
748 self.store.createmode)
749 self._transref = weakref.ref(tr)
749 self._transref = weakref.ref(tr)
750 return tr
750 return tr
751
751
752 def _journalfiles(self):
752 def _journalfiles(self):
753 return (self.sjoin('journal'), self.join('journal.dirstate'),
753 return (self.sjoin('journal'), self.join('journal.dirstate'),
754 self.join('journal.branch'), self.join('journal.desc'),
754 self.join('journal.branch'), self.join('journal.desc'),
755 self.join('journal.bookmarks'),
755 self.join('journal.bookmarks'),
756 self.sjoin('journal.phaseroots'))
756 self.sjoin('journal.phaseroots'))
757
757
758 def undofiles(self):
758 def undofiles(self):
759 return [undoname(x) for x in self._journalfiles()]
759 return [undoname(x) for x in self._journalfiles()]
760
760
761 def _writejournal(self, desc):
761 def _writejournal(self, desc):
762 self.opener.write("journal.dirstate",
762 self.opener.write("journal.dirstate",
763 self.opener.tryread("dirstate"))
763 self.opener.tryread("dirstate"))
764 self.opener.write("journal.branch",
764 self.opener.write("journal.branch",
765 encoding.fromlocal(self.dirstate.branch()))
765 encoding.fromlocal(self.dirstate.branch()))
766 self.opener.write("journal.desc",
766 self.opener.write("journal.desc",
767 "%d\n%s\n" % (len(self), desc))
767 "%d\n%s\n" % (len(self), desc))
768 self.opener.write("journal.bookmarks",
768 self.opener.write("journal.bookmarks",
769 self.opener.tryread("bookmarks"))
769 self.opener.tryread("bookmarks"))
770 self.sopener.write("journal.phaseroots",
770 self.sopener.write("journal.phaseroots",
771 self.sopener.tryread("phaseroots"))
771 self.sopener.tryread("phaseroots"))
772
772
773 def recover(self):
773 def recover(self):
774 lock = self.lock()
774 lock = self.lock()
775 try:
775 try:
776 if os.path.exists(self.sjoin("journal")):
776 if os.path.exists(self.sjoin("journal")):
777 self.ui.status(_("rolling back interrupted transaction\n"))
777 self.ui.status(_("rolling back interrupted transaction\n"))
778 transaction.rollback(self.sopener, self.sjoin("journal"),
778 transaction.rollback(self.sopener, self.sjoin("journal"),
779 self.ui.warn)
779 self.ui.warn)
780 self.invalidate()
780 self.invalidate()
781 return True
781 return True
782 else:
782 else:
783 self.ui.warn(_("no interrupted transaction available\n"))
783 self.ui.warn(_("no interrupted transaction available\n"))
784 return False
784 return False
785 finally:
785 finally:
786 lock.release()
786 lock.release()
787
787
788 def rollback(self, dryrun=False, force=False):
788 def rollback(self, dryrun=False, force=False):
789 wlock = lock = None
789 wlock = lock = None
790 try:
790 try:
791 wlock = self.wlock()
791 wlock = self.wlock()
792 lock = self.lock()
792 lock = self.lock()
793 if os.path.exists(self.sjoin("undo")):
793 if os.path.exists(self.sjoin("undo")):
794 return self._rollback(dryrun, force)
794 return self._rollback(dryrun, force)
795 else:
795 else:
796 self.ui.warn(_("no rollback information available\n"))
796 self.ui.warn(_("no rollback information available\n"))
797 return 1
797 return 1
798 finally:
798 finally:
799 release(lock, wlock)
799 release(lock, wlock)
800
800
801 def _rollback(self, dryrun, force):
801 def _rollback(self, dryrun, force):
802 ui = self.ui
802 ui = self.ui
803 try:
803 try:
804 args = self.opener.read('undo.desc').splitlines()
804 args = self.opener.read('undo.desc').splitlines()
805 (oldlen, desc, detail) = (int(args[0]), args[1], None)
805 (oldlen, desc, detail) = (int(args[0]), args[1], None)
806 if len(args) >= 3:
806 if len(args) >= 3:
807 detail = args[2]
807 detail = args[2]
808 oldtip = oldlen - 1
808 oldtip = oldlen - 1
809
809
810 if detail and ui.verbose:
810 if detail and ui.verbose:
811 msg = (_('repository tip rolled back to revision %s'
811 msg = (_('repository tip rolled back to revision %s'
812 ' (undo %s: %s)\n')
812 ' (undo %s: %s)\n')
813 % (oldtip, desc, detail))
813 % (oldtip, desc, detail))
814 else:
814 else:
815 msg = (_('repository tip rolled back to revision %s'
815 msg = (_('repository tip rolled back to revision %s'
816 ' (undo %s)\n')
816 ' (undo %s)\n')
817 % (oldtip, desc))
817 % (oldtip, desc))
818 except IOError:
818 except IOError:
819 msg = _('rolling back unknown transaction\n')
819 msg = _('rolling back unknown transaction\n')
820 desc = None
820 desc = None
821
821
822 if not force and self['.'] != self['tip'] and desc == 'commit':
822 if not force and self['.'] != self['tip'] and desc == 'commit':
823 raise util.Abort(
823 raise util.Abort(
824 _('rollback of last commit while not checked out '
824 _('rollback of last commit while not checked out '
825 'may lose data'), hint=_('use -f to force'))
825 'may lose data'), hint=_('use -f to force'))
826
826
827 ui.status(msg)
827 ui.status(msg)
828 if dryrun:
828 if dryrun:
829 return 0
829 return 0
830
830
831 parents = self.dirstate.parents()
831 parents = self.dirstate.parents()
832 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
832 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
833 if os.path.exists(self.join('undo.bookmarks')):
833 if os.path.exists(self.join('undo.bookmarks')):
834 util.rename(self.join('undo.bookmarks'),
834 util.rename(self.join('undo.bookmarks'),
835 self.join('bookmarks'))
835 self.join('bookmarks'))
836 if os.path.exists(self.sjoin('undo.phaseroots')):
836 if os.path.exists(self.sjoin('undo.phaseroots')):
837 util.rename(self.sjoin('undo.phaseroots'),
837 util.rename(self.sjoin('undo.phaseroots'),
838 self.sjoin('phaseroots'))
838 self.sjoin('phaseroots'))
839 self.invalidate()
839 self.invalidate()
840
840
841 parentgone = (parents[0] not in self.changelog.nodemap or
841 parentgone = (parents[0] not in self.changelog.nodemap or
842 parents[1] not in self.changelog.nodemap)
842 parents[1] not in self.changelog.nodemap)
843 if parentgone:
843 if parentgone:
844 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
844 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
845 try:
845 try:
846 branch = self.opener.read('undo.branch')
846 branch = self.opener.read('undo.branch')
847 self.dirstate.setbranch(branch)
847 self.dirstate.setbranch(branch)
848 except IOError:
848 except IOError:
849 ui.warn(_('named branch could not be reset: '
849 ui.warn(_('named branch could not be reset: '
850 'current branch is still \'%s\'\n')
850 'current branch is still \'%s\'\n')
851 % self.dirstate.branch())
851 % self.dirstate.branch())
852
852
853 self.dirstate.invalidate()
853 self.dirstate.invalidate()
854 parents = tuple([p.rev() for p in self.parents()])
854 parents = tuple([p.rev() for p in self.parents()])
855 if len(parents) > 1:
855 if len(parents) > 1:
856 ui.status(_('working directory now based on '
856 ui.status(_('working directory now based on '
857 'revisions %d and %d\n') % parents)
857 'revisions %d and %d\n') % parents)
858 else:
858 else:
859 ui.status(_('working directory now based on '
859 ui.status(_('working directory now based on '
860 'revision %d\n') % parents)
860 'revision %d\n') % parents)
861 self.destroyed()
861 self.destroyed()
862 return 0
862 return 0
863
863
864 def invalidatecaches(self):
864 def invalidatecaches(self):
865 def delcache(name):
865 def delcache(name):
866 try:
866 try:
867 delattr(self, name)
867 delattr(self, name)
868 except AttributeError:
868 except AttributeError:
869 pass
869 pass
870
870
871 delcache('_tagscache')
871 delcache('_tagscache')
872
872
873 self._branchcache = None # in UTF-8
873 self._branchcache = None # in UTF-8
874 self._branchcachetip = None
874 self._branchcachetip = None
875
875
876 def invalidatedirstate(self):
876 def invalidatedirstate(self):
877 '''Invalidates the dirstate, causing the next call to dirstate
877 '''Invalidates the dirstate, causing the next call to dirstate
878 to check if it was modified since the last time it was read,
878 to check if it was modified since the last time it was read,
879 rereading it if it has.
879 rereading it if it has.
880
880
881 This is different to dirstate.invalidate() that it doesn't always
881 This is different to dirstate.invalidate() that it doesn't always
882 rereads the dirstate. Use dirstate.invalidate() if you want to
882 rereads the dirstate. Use dirstate.invalidate() if you want to
883 explicitly read the dirstate again (i.e. restoring it to a previous
883 explicitly read the dirstate again (i.e. restoring it to a previous
884 known good state).'''
884 known good state).'''
885 if 'dirstate' in self.__dict__:
885 if 'dirstate' in self.__dict__:
886 for k in self.dirstate._filecache:
886 for k in self.dirstate._filecache:
887 try:
887 try:
888 delattr(self.dirstate, k)
888 delattr(self.dirstate, k)
889 except AttributeError:
889 except AttributeError:
890 pass
890 pass
891 delattr(self, 'dirstate')
891 delattr(self, 'dirstate')
892
892
893 def invalidate(self):
893 def invalidate(self):
894 for k in self._filecache:
894 for k in self._filecache:
895 # dirstate is invalidated separately in invalidatedirstate()
895 # dirstate is invalidated separately in invalidatedirstate()
896 if k == 'dirstate':
896 if k == 'dirstate':
897 continue
897 continue
898
898
899 try:
899 try:
900 delattr(self, k)
900 delattr(self, k)
901 except AttributeError:
901 except AttributeError:
902 pass
902 pass
903 self.invalidatecaches()
903 self.invalidatecaches()
904
904
905 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
905 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
906 try:
906 try:
907 l = lock.lock(lockname, 0, releasefn, desc=desc)
907 l = lock.lock(lockname, 0, releasefn, desc=desc)
908 except error.LockHeld, inst:
908 except error.LockHeld, inst:
909 if not wait:
909 if not wait:
910 raise
910 raise
911 self.ui.warn(_("waiting for lock on %s held by %r\n") %
911 self.ui.warn(_("waiting for lock on %s held by %r\n") %
912 (desc, inst.locker))
912 (desc, inst.locker))
913 # default to 600 seconds timeout
913 # default to 600 seconds timeout
914 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
914 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
915 releasefn, desc=desc)
915 releasefn, desc=desc)
916 if acquirefn:
916 if acquirefn:
917 acquirefn()
917 acquirefn()
918 return l
918 return l
919
919
920 def _afterlock(self, callback):
920 def _afterlock(self, callback):
921 """add a callback to the current repository lock.
921 """add a callback to the current repository lock.
922
922
923 The callback will be executed on lock release."""
923 The callback will be executed on lock release."""
924 l = self._lockref and self._lockref()
924 l = self._lockref and self._lockref()
925 if l:
925 if l:
926 l.postrelease.append(callback)
926 l.postrelease.append(callback)
927 else:
927 else:
928 callback()
928 callback()
929
929
930 def lock(self, wait=True):
930 def lock(self, wait=True):
931 '''Lock the repository store (.hg/store) and return a weak reference
931 '''Lock the repository store (.hg/store) and return a weak reference
932 to the lock. Use this before modifying the store (e.g. committing or
932 to the lock. Use this before modifying the store (e.g. committing or
933 stripping). If you are opening a transaction, get a lock as well.)'''
933 stripping). If you are opening a transaction, get a lock as well.)'''
934 l = self._lockref and self._lockref()
934 l = self._lockref and self._lockref()
935 if l is not None and l.held:
935 if l is not None and l.held:
936 l.lock()
936 l.lock()
937 return l
937 return l
938
938
939 def unlock():
939 def unlock():
940 self.store.write()
940 self.store.write()
941 if '_phasecache' in vars(self):
941 if '_phasecache' in vars(self):
942 self._phasecache.write()
942 self._phasecache.write()
943 for k, ce in self._filecache.items():
943 for k, ce in self._filecache.items():
944 if k == 'dirstate':
944 if k == 'dirstate':
945 continue
945 continue
946 ce.refresh()
946 ce.refresh()
947
947
948 l = self._lock(self.sjoin("lock"), wait, unlock,
948 l = self._lock(self.sjoin("lock"), wait, unlock,
949 self.invalidate, _('repository %s') % self.origroot)
949 self.invalidate, _('repository %s') % self.origroot)
950 self._lockref = weakref.ref(l)
950 self._lockref = weakref.ref(l)
951 return l
951 return l
952
952
953 def wlock(self, wait=True):
953 def wlock(self, wait=True):
954 '''Lock the non-store parts of the repository (everything under
954 '''Lock the non-store parts of the repository (everything under
955 .hg except .hg/store) and return a weak reference to the lock.
955 .hg except .hg/store) and return a weak reference to the lock.
956 Use this before modifying files in .hg.'''
956 Use this before modifying files in .hg.'''
957 l = self._wlockref and self._wlockref()
957 l = self._wlockref and self._wlockref()
958 if l is not None and l.held:
958 if l is not None and l.held:
959 l.lock()
959 l.lock()
960 return l
960 return l
961
961
962 def unlock():
962 def unlock():
963 self.dirstate.write()
963 self.dirstate.write()
964 ce = self._filecache.get('dirstate')
964 ce = self._filecache.get('dirstate')
965 if ce:
965 if ce:
966 ce.refresh()
966 ce.refresh()
967
967
968 l = self._lock(self.join("wlock"), wait, unlock,
968 l = self._lock(self.join("wlock"), wait, unlock,
969 self.invalidatedirstate, _('working directory of %s') %
969 self.invalidatedirstate, _('working directory of %s') %
970 self.origroot)
970 self.origroot)
971 self._wlockref = weakref.ref(l)
971 self._wlockref = weakref.ref(l)
972 return l
972 return l
973
973
974 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
974 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
975 """
975 """
976 commit an individual file as part of a larger transaction
976 commit an individual file as part of a larger transaction
977 """
977 """
978
978
979 fname = fctx.path()
979 fname = fctx.path()
980 text = fctx.data()
980 text = fctx.data()
981 flog = self.file(fname)
981 flog = self.file(fname)
982 fparent1 = manifest1.get(fname, nullid)
982 fparent1 = manifest1.get(fname, nullid)
983 fparent2 = fparent2o = manifest2.get(fname, nullid)
983 fparent2 = fparent2o = manifest2.get(fname, nullid)
984
984
985 meta = {}
985 meta = {}
986 copy = fctx.renamed()
986 copy = fctx.renamed()
987 if copy and copy[0] != fname:
987 if copy and copy[0] != fname:
988 # Mark the new revision of this file as a copy of another
988 # Mark the new revision of this file as a copy of another
989 # file. This copy data will effectively act as a parent
989 # file. This copy data will effectively act as a parent
990 # of this new revision. If this is a merge, the first
990 # of this new revision. If this is a merge, the first
991 # parent will be the nullid (meaning "look up the copy data")
991 # parent will be the nullid (meaning "look up the copy data")
992 # and the second one will be the other parent. For example:
992 # and the second one will be the other parent. For example:
993 #
993 #
994 # 0 --- 1 --- 3 rev1 changes file foo
994 # 0 --- 1 --- 3 rev1 changes file foo
995 # \ / rev2 renames foo to bar and changes it
995 # \ / rev2 renames foo to bar and changes it
996 # \- 2 -/ rev3 should have bar with all changes and
996 # \- 2 -/ rev3 should have bar with all changes and
997 # should record that bar descends from
997 # should record that bar descends from
998 # bar in rev2 and foo in rev1
998 # bar in rev2 and foo in rev1
999 #
999 #
1000 # this allows this merge to succeed:
1000 # this allows this merge to succeed:
1001 #
1001 #
1002 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1002 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1003 # \ / merging rev3 and rev4 should use bar@rev2
1003 # \ / merging rev3 and rev4 should use bar@rev2
1004 # \- 2 --- 4 as the merge base
1004 # \- 2 --- 4 as the merge base
1005 #
1005 #
1006
1006
1007 cfname = copy[0]
1007 cfname = copy[0]
1008 crev = manifest1.get(cfname)
1008 crev = manifest1.get(cfname)
1009 newfparent = fparent2
1009 newfparent = fparent2
1010
1010
1011 if manifest2: # branch merge
1011 if manifest2: # branch merge
1012 if fparent2 == nullid or crev is None: # copied on remote side
1012 if fparent2 == nullid or crev is None: # copied on remote side
1013 if cfname in manifest2:
1013 if cfname in manifest2:
1014 crev = manifest2[cfname]
1014 crev = manifest2[cfname]
1015 newfparent = fparent1
1015 newfparent = fparent1
1016
1016
1017 # find source in nearest ancestor if we've lost track
1017 # find source in nearest ancestor if we've lost track
1018 if not crev:
1018 if not crev:
1019 self.ui.debug(" %s: searching for copy revision for %s\n" %
1019 self.ui.debug(" %s: searching for copy revision for %s\n" %
1020 (fname, cfname))
1020 (fname, cfname))
1021 for ancestor in self[None].ancestors():
1021 for ancestor in self[None].ancestors():
1022 if cfname in ancestor:
1022 if cfname in ancestor:
1023 crev = ancestor[cfname].filenode()
1023 crev = ancestor[cfname].filenode()
1024 break
1024 break
1025
1025
1026 if crev:
1026 if crev:
1027 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1027 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1028 meta["copy"] = cfname
1028 meta["copy"] = cfname
1029 meta["copyrev"] = hex(crev)
1029 meta["copyrev"] = hex(crev)
1030 fparent1, fparent2 = nullid, newfparent
1030 fparent1, fparent2 = nullid, newfparent
1031 else:
1031 else:
1032 self.ui.warn(_("warning: can't find ancestor for '%s' "
1032 self.ui.warn(_("warning: can't find ancestor for '%s' "
1033 "copied from '%s'!\n") % (fname, cfname))
1033 "copied from '%s'!\n") % (fname, cfname))
1034
1034
1035 elif fparent2 != nullid:
1035 elif fparent2 != nullid:
1036 # is one parent an ancestor of the other?
1036 # is one parent an ancestor of the other?
1037 fparentancestor = flog.ancestor(fparent1, fparent2)
1037 fparentancestor = flog.ancestor(fparent1, fparent2)
1038 if fparentancestor == fparent1:
1038 if fparentancestor == fparent1:
1039 fparent1, fparent2 = fparent2, nullid
1039 fparent1, fparent2 = fparent2, nullid
1040 elif fparentancestor == fparent2:
1040 elif fparentancestor == fparent2:
1041 fparent2 = nullid
1041 fparent2 = nullid
1042
1042
1043 # is the file changed?
1043 # is the file changed?
1044 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1044 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1045 changelist.append(fname)
1045 changelist.append(fname)
1046 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1046 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1047
1047
1048 # are just the flags changed during merge?
1048 # are just the flags changed during merge?
1049 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1049 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1050 changelist.append(fname)
1050 changelist.append(fname)
1051
1051
1052 return fparent1
1052 return fparent1
1053
1053
1054 def commit(self, text="", user=None, date=None, match=None, force=False,
1054 def commit(self, text="", user=None, date=None, match=None, force=False,
1055 editor=False, extra={}):
1055 editor=False, extra={}):
1056 """Add a new revision to current repository.
1056 """Add a new revision to current repository.
1057
1057
1058 Revision information is gathered from the working directory,
1058 Revision information is gathered from the working directory,
1059 match can be used to filter the committed files. If editor is
1059 match can be used to filter the committed files. If editor is
1060 supplied, it is called to get a commit message.
1060 supplied, it is called to get a commit message.
1061 """
1061 """
1062
1062
1063 def fail(f, msg):
1063 def fail(f, msg):
1064 raise util.Abort('%s: %s' % (f, msg))
1064 raise util.Abort('%s: %s' % (f, msg))
1065
1065
1066 if not match:
1066 if not match:
1067 match = matchmod.always(self.root, '')
1067 match = matchmod.always(self.root, '')
1068
1068
1069 if not force:
1069 if not force:
1070 vdirs = []
1070 vdirs = []
1071 match.dir = vdirs.append
1071 match.dir = vdirs.append
1072 match.bad = fail
1072 match.bad = fail
1073
1073
1074 wlock = self.wlock()
1074 wlock = self.wlock()
1075 try:
1075 try:
1076 wctx = self[None]
1076 wctx = self[None]
1077 merge = len(wctx.parents()) > 1
1077 merge = len(wctx.parents()) > 1
1078
1078
1079 if (not force and merge and match and
1079 if (not force and merge and match and
1080 (match.files() or match.anypats())):
1080 (match.files() or match.anypats())):
1081 raise util.Abort(_('cannot partially commit a merge '
1081 raise util.Abort(_('cannot partially commit a merge '
1082 '(do not specify files or patterns)'))
1082 '(do not specify files or patterns)'))
1083
1083
1084 changes = self.status(match=match, clean=force)
1084 changes = self.status(match=match, clean=force)
1085 if force:
1085 if force:
1086 changes[0].extend(changes[6]) # mq may commit unchanged files
1086 changes[0].extend(changes[6]) # mq may commit unchanged files
1087
1087
1088 # check subrepos
1088 # check subrepos
1089 subs = []
1089 subs = []
1090 commitsubs = set()
1090 commitsubs = set()
1091 newstate = wctx.substate.copy()
1091 newstate = wctx.substate.copy()
1092 # only manage subrepos and .hgsubstate if .hgsub is present
1092 # only manage subrepos and .hgsubstate if .hgsub is present
1093 if '.hgsub' in wctx:
1093 if '.hgsub' in wctx:
1094 # we'll decide whether to track this ourselves, thanks
1094 # we'll decide whether to track this ourselves, thanks
1095 if '.hgsubstate' in changes[0]:
1095 if '.hgsubstate' in changes[0]:
1096 changes[0].remove('.hgsubstate')
1096 changes[0].remove('.hgsubstate')
1097 if '.hgsubstate' in changes[2]:
1097 if '.hgsubstate' in changes[2]:
1098 changes[2].remove('.hgsubstate')
1098 changes[2].remove('.hgsubstate')
1099
1099
1100 # compare current state to last committed state
1100 # compare current state to last committed state
1101 # build new substate based on last committed state
1101 # build new substate based on last committed state
1102 oldstate = wctx.p1().substate
1102 oldstate = wctx.p1().substate
1103 for s in sorted(newstate.keys()):
1103 for s in sorted(newstate.keys()):
1104 if not match(s):
1104 if not match(s):
1105 # ignore working copy, use old state if present
1105 # ignore working copy, use old state if present
1106 if s in oldstate:
1106 if s in oldstate:
1107 newstate[s] = oldstate[s]
1107 newstate[s] = oldstate[s]
1108 continue
1108 continue
1109 if not force:
1109 if not force:
1110 raise util.Abort(
1110 raise util.Abort(
1111 _("commit with new subrepo %s excluded") % s)
1111 _("commit with new subrepo %s excluded") % s)
1112 if wctx.sub(s).dirty(True):
1112 if wctx.sub(s).dirty(True):
1113 if not self.ui.configbool('ui', 'commitsubrepos'):
1113 if not self.ui.configbool('ui', 'commitsubrepos'):
1114 raise util.Abort(
1114 raise util.Abort(
1115 _("uncommitted changes in subrepo %s") % s,
1115 _("uncommitted changes in subrepo %s") % s,
1116 hint=_("use --subrepos for recursive commit"))
1116 hint=_("use --subrepos for recursive commit"))
1117 subs.append(s)
1117 subs.append(s)
1118 commitsubs.add(s)
1118 commitsubs.add(s)
1119 else:
1119 else:
1120 bs = wctx.sub(s).basestate()
1120 bs = wctx.sub(s).basestate()
1121 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1121 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1122 if oldstate.get(s, (None, None, None))[1] != bs:
1122 if oldstate.get(s, (None, None, None))[1] != bs:
1123 subs.append(s)
1123 subs.append(s)
1124
1124
1125 # check for removed subrepos
1125 # check for removed subrepos
1126 for p in wctx.parents():
1126 for p in wctx.parents():
1127 r = [s for s in p.substate if s not in newstate]
1127 r = [s for s in p.substate if s not in newstate]
1128 subs += [s for s in r if match(s)]
1128 subs += [s for s in r if match(s)]
1129 if subs:
1129 if subs:
1130 if (not match('.hgsub') and
1130 if (not match('.hgsub') and
1131 '.hgsub' in (wctx.modified() + wctx.added())):
1131 '.hgsub' in (wctx.modified() + wctx.added())):
1132 raise util.Abort(
1132 raise util.Abort(
1133 _("can't commit subrepos without .hgsub"))
1133 _("can't commit subrepos without .hgsub"))
1134 changes[0].insert(0, '.hgsubstate')
1134 changes[0].insert(0, '.hgsubstate')
1135
1135
1136 elif '.hgsub' in changes[2]:
1136 elif '.hgsub' in changes[2]:
1137 # clean up .hgsubstate when .hgsub is removed
1137 # clean up .hgsubstate when .hgsub is removed
1138 if ('.hgsubstate' in wctx and
1138 if ('.hgsubstate' in wctx and
1139 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1139 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1140 changes[2].insert(0, '.hgsubstate')
1140 changes[2].insert(0, '.hgsubstate')
1141
1141
1142 # make sure all explicit patterns are matched
1142 # make sure all explicit patterns are matched
1143 if not force and match.files():
1143 if not force and match.files():
1144 matched = set(changes[0] + changes[1] + changes[2])
1144 matched = set(changes[0] + changes[1] + changes[2])
1145
1145
1146 for f in match.files():
1146 for f in match.files():
1147 if f == '.' or f in matched or f in wctx.substate:
1147 if f == '.' or f in matched or f in wctx.substate:
1148 continue
1148 continue
1149 if f in changes[3]: # missing
1149 if f in changes[3]: # missing
1150 fail(f, _('file not found!'))
1150 fail(f, _('file not found!'))
1151 if f in vdirs: # visited directory
1151 if f in vdirs: # visited directory
1152 d = f + '/'
1152 d = f + '/'
1153 for mf in matched:
1153 for mf in matched:
1154 if mf.startswith(d):
1154 if mf.startswith(d):
1155 break
1155 break
1156 else:
1156 else:
1157 fail(f, _("no match under directory!"))
1157 fail(f, _("no match under directory!"))
1158 elif f not in self.dirstate:
1158 elif f not in self.dirstate:
1159 fail(f, _("file not tracked!"))
1159 fail(f, _("file not tracked!"))
1160
1160
1161 if (not force and not extra.get("close") and not merge
1161 if (not force and not extra.get("close") and not merge
1162 and not (changes[0] or changes[1] or changes[2])
1162 and not (changes[0] or changes[1] or changes[2])
1163 and wctx.branch() == wctx.p1().branch()):
1163 and wctx.branch() == wctx.p1().branch()):
1164 return None
1164 return None
1165
1165
1166 if merge and changes[3]:
1166 if merge and changes[3]:
1167 raise util.Abort(_("cannot commit merge with missing files"))
1167 raise util.Abort(_("cannot commit merge with missing files"))
1168
1168
1169 ms = mergemod.mergestate(self)
1169 ms = mergemod.mergestate(self)
1170 for f in changes[0]:
1170 for f in changes[0]:
1171 if f in ms and ms[f] == 'u':
1171 if f in ms and ms[f] == 'u':
1172 raise util.Abort(_("unresolved merge conflicts "
1172 raise util.Abort(_("unresolved merge conflicts "
1173 "(see hg help resolve)"))
1173 "(see hg help resolve)"))
1174
1174
1175 cctx = context.workingctx(self, text, user, date, extra, changes)
1175 cctx = context.workingctx(self, text, user, date, extra, changes)
1176 if editor:
1176 if editor:
1177 cctx._text = editor(self, cctx, subs)
1177 cctx._text = editor(self, cctx, subs)
1178 edited = (text != cctx._text)
1178 edited = (text != cctx._text)
1179
1179
1180 # commit subs and write new state
1180 # commit subs and write new state
1181 if subs:
1181 if subs:
1182 for s in sorted(commitsubs):
1182 for s in sorted(commitsubs):
1183 sub = wctx.sub(s)
1183 sub = wctx.sub(s)
1184 self.ui.status(_('committing subrepository %s\n') %
1184 self.ui.status(_('committing subrepository %s\n') %
1185 subrepo.subrelpath(sub))
1185 subrepo.subrelpath(sub))
1186 sr = sub.commit(cctx._text, user, date)
1186 sr = sub.commit(cctx._text, user, date)
1187 newstate[s] = (newstate[s][0], sr)
1187 newstate[s] = (newstate[s][0], sr)
1188 subrepo.writestate(self, newstate)
1188 subrepo.writestate(self, newstate)
1189
1189
1190 # Save commit message in case this transaction gets rolled back
1190 # Save commit message in case this transaction gets rolled back
1191 # (e.g. by a pretxncommit hook). Leave the content alone on
1191 # (e.g. by a pretxncommit hook). Leave the content alone on
1192 # the assumption that the user will use the same editor again.
1192 # the assumption that the user will use the same editor again.
1193 msgfn = self.savecommitmessage(cctx._text)
1193 msgfn = self.savecommitmessage(cctx._text)
1194
1194
1195 p1, p2 = self.dirstate.parents()
1195 p1, p2 = self.dirstate.parents()
1196 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1196 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1197 try:
1197 try:
1198 self.hook("precommit", throw=True, parent1=hookp1,
1198 self.hook("precommit", throw=True, parent1=hookp1,
1199 parent2=hookp2)
1199 parent2=hookp2)
1200 ret = self.commitctx(cctx, True)
1200 ret = self.commitctx(cctx, True)
1201 except: # re-raises
1201 except: # re-raises
1202 if edited:
1202 if edited:
1203 self.ui.write(
1203 self.ui.write(
1204 _('note: commit message saved in %s\n') % msgfn)
1204 _('note: commit message saved in %s\n') % msgfn)
1205 raise
1205 raise
1206
1206
1207 # update bookmarks, dirstate and mergestate
1207 # update bookmarks, dirstate and mergestate
1208 bookmarks.update(self, [p1, p2], ret)
1208 bookmarks.update(self, [p1, p2], ret)
1209 for f in changes[0] + changes[1]:
1209 for f in changes[0] + changes[1]:
1210 self.dirstate.normal(f)
1210 self.dirstate.normal(f)
1211 for f in changes[2]:
1211 for f in changes[2]:
1212 self.dirstate.drop(f)
1212 self.dirstate.drop(f)
1213 self.dirstate.setparents(ret)
1213 self.dirstate.setparents(ret)
1214 ms.reset()
1214 ms.reset()
1215 finally:
1215 finally:
1216 wlock.release()
1216 wlock.release()
1217
1217
1218 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1218 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1219 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1219 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1220 self._afterlock(commithook)
1220 self._afterlock(commithook)
1221 return ret
1221 return ret
1222
1222
1223 def commitctx(self, ctx, error=False):
1223 def commitctx(self, ctx, error=False):
1224 """Add a new revision to current repository.
1224 """Add a new revision to current repository.
1225 Revision information is passed via the context argument.
1225 Revision information is passed via the context argument.
1226 """
1226 """
1227
1227
1228 tr = lock = None
1228 tr = lock = None
1229 removed = list(ctx.removed())
1229 removed = list(ctx.removed())
1230 p1, p2 = ctx.p1(), ctx.p2()
1230 p1, p2 = ctx.p1(), ctx.p2()
1231 user = ctx.user()
1231 user = ctx.user()
1232
1232
1233 lock = self.lock()
1233 lock = self.lock()
1234 try:
1234 try:
1235 tr = self.transaction("commit")
1235 tr = self.transaction("commit")
1236 trp = weakref.proxy(tr)
1236 trp = weakref.proxy(tr)
1237
1237
1238 if ctx.files():
1238 if ctx.files():
1239 m1 = p1.manifest().copy()
1239 m1 = p1.manifest().copy()
1240 m2 = p2.manifest()
1240 m2 = p2.manifest()
1241
1241
1242 # check in files
1242 # check in files
1243 new = {}
1243 new = {}
1244 changed = []
1244 changed = []
1245 linkrev = len(self)
1245 linkrev = len(self)
1246 for f in sorted(ctx.modified() + ctx.added()):
1246 for f in sorted(ctx.modified() + ctx.added()):
1247 self.ui.note(f + "\n")
1247 self.ui.note(f + "\n")
1248 try:
1248 try:
1249 fctx = ctx[f]
1249 fctx = ctx[f]
1250 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1250 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1251 changed)
1251 changed)
1252 m1.set(f, fctx.flags())
1252 m1.set(f, fctx.flags())
1253 except OSError, inst:
1253 except OSError, inst:
1254 self.ui.warn(_("trouble committing %s!\n") % f)
1254 self.ui.warn(_("trouble committing %s!\n") % f)
1255 raise
1255 raise
1256 except IOError, inst:
1256 except IOError, inst:
1257 errcode = getattr(inst, 'errno', errno.ENOENT)
1257 errcode = getattr(inst, 'errno', errno.ENOENT)
1258 if error or errcode and errcode != errno.ENOENT:
1258 if error or errcode and errcode != errno.ENOENT:
1259 self.ui.warn(_("trouble committing %s!\n") % f)
1259 self.ui.warn(_("trouble committing %s!\n") % f)
1260 raise
1260 raise
1261 else:
1261 else:
1262 removed.append(f)
1262 removed.append(f)
1263
1263
1264 # update manifest
1264 # update manifest
1265 m1.update(new)
1265 m1.update(new)
1266 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1266 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1267 drop = [f for f in removed if f in m1]
1267 drop = [f for f in removed if f in m1]
1268 for f in drop:
1268 for f in drop:
1269 del m1[f]
1269 del m1[f]
1270 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1270 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1271 p2.manifestnode(), (new, drop))
1271 p2.manifestnode(), (new, drop))
1272 files = changed + removed
1272 files = changed + removed
1273 else:
1273 else:
1274 mn = p1.manifestnode()
1274 mn = p1.manifestnode()
1275 files = []
1275 files = []
1276
1276
1277 # update changelog
1277 # update changelog
1278 self.changelog.delayupdate()
1278 self.changelog.delayupdate()
1279 n = self.changelog.add(mn, files, ctx.description(),
1279 n = self.changelog.add(mn, files, ctx.description(),
1280 trp, p1.node(), p2.node(),
1280 trp, p1.node(), p2.node(),
1281 user, ctx.date(), ctx.extra().copy())
1281 user, ctx.date(), ctx.extra().copy())
1282 p = lambda: self.changelog.writepending() and self.root or ""
1282 p = lambda: self.changelog.writepending() and self.root or ""
1283 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1283 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1284 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1284 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1285 parent2=xp2, pending=p)
1285 parent2=xp2, pending=p)
1286 self.changelog.finalize(trp)
1286 self.changelog.finalize(trp)
1287 # set the new commit is proper phase
1287 # set the new commit is proper phase
1288 targetphase = phases.newcommitphase(self.ui)
1288 targetphase = phases.newcommitphase(self.ui)
1289 if targetphase:
1289 if targetphase:
1290 # retract boundary do not alter parent changeset.
1290 # retract boundary do not alter parent changeset.
1291 # if a parent have higher the resulting phase will
1291 # if a parent have higher the resulting phase will
1292 # be compliant anyway
1292 # be compliant anyway
1293 #
1293 #
1294 # if minimal phase was 0 we don't need to retract anything
1294 # if minimal phase was 0 we don't need to retract anything
1295 phases.retractboundary(self, targetphase, [n])
1295 phases.retractboundary(self, targetphase, [n])
1296 tr.close()
1296 tr.close()
1297 self.updatebranchcache()
1297 self.updatebranchcache()
1298 return n
1298 return n
1299 finally:
1299 finally:
1300 if tr:
1300 if tr:
1301 tr.release()
1301 tr.release()
1302 lock.release()
1302 lock.release()
1303
1303
1304 def destroyed(self):
1304 def destroyed(self):
1305 '''Inform the repository that nodes have been destroyed.
1305 '''Inform the repository that nodes have been destroyed.
1306 Intended for use by strip and rollback, so there's a common
1306 Intended for use by strip and rollback, so there's a common
1307 place for anything that has to be done after destroying history.'''
1307 place for anything that has to be done after destroying history.'''
1308 # XXX it might be nice if we could take the list of destroyed
1308 # XXX it might be nice if we could take the list of destroyed
1309 # nodes, but I don't see an easy way for rollback() to do that
1309 # nodes, but I don't see an easy way for rollback() to do that
1310
1310
1311 # Ensure the persistent tag cache is updated. Doing it now
1311 # Ensure the persistent tag cache is updated. Doing it now
1312 # means that the tag cache only has to worry about destroyed
1312 # means that the tag cache only has to worry about destroyed
1313 # heads immediately after a strip/rollback. That in turn
1313 # heads immediately after a strip/rollback. That in turn
1314 # guarantees that "cachetip == currenttip" (comparing both rev
1314 # guarantees that "cachetip == currenttip" (comparing both rev
1315 # and node) always means no nodes have been added or destroyed.
1315 # and node) always means no nodes have been added or destroyed.
1316
1316
1317 # XXX this is suboptimal when qrefresh'ing: we strip the current
1317 # XXX this is suboptimal when qrefresh'ing: we strip the current
1318 # head, refresh the tag cache, then immediately add a new head.
1318 # head, refresh the tag cache, then immediately add a new head.
1319 # But I think doing it this way is necessary for the "instant
1319 # But I think doing it this way is necessary for the "instant
1320 # tag cache retrieval" case to work.
1320 # tag cache retrieval" case to work.
1321 self.invalidatecaches()
1321 self.invalidatecaches()
1322
1322
1323 # Discard all cache entries to force reloading everything.
1323 # Discard all cache entries to force reloading everything.
1324 self._filecache.clear()
1324 self._filecache.clear()
1325
1325
1326 def walk(self, match, node=None):
1326 def walk(self, match, node=None):
1327 '''
1327 '''
1328 walk recursively through the directory tree or a given
1328 walk recursively through the directory tree or a given
1329 changeset, finding all files matched by the match
1329 changeset, finding all files matched by the match
1330 function
1330 function
1331 '''
1331 '''
1332 return self[node].walk(match)
1332 return self[node].walk(match)
1333
1333
1334 def status(self, node1='.', node2=None, match=None,
1334 def status(self, node1='.', node2=None, match=None,
1335 ignored=False, clean=False, unknown=False,
1335 ignored=False, clean=False, unknown=False,
1336 listsubrepos=False):
1336 listsubrepos=False):
1337 """return status of files between two nodes or node and working
1337 """return status of files between two nodes or node and working
1338 directory.
1338 directory.
1339
1339
1340 If node1 is None, use the first dirstate parent instead.
1340 If node1 is None, use the first dirstate parent instead.
1341 If node2 is None, compare node1 with working directory.
1341 If node2 is None, compare node1 with working directory.
1342 """
1342 """
1343
1343
1344 def mfmatches(ctx):
1344 def mfmatches(ctx):
1345 mf = ctx.manifest().copy()
1345 mf = ctx.manifest().copy()
1346 if match.always():
1346 if match.always():
1347 return mf
1347 return mf
1348 for fn in mf.keys():
1348 for fn in mf.keys():
1349 if not match(fn):
1349 if not match(fn):
1350 del mf[fn]
1350 del mf[fn]
1351 return mf
1351 return mf
1352
1352
1353 if isinstance(node1, context.changectx):
1353 if isinstance(node1, context.changectx):
1354 ctx1 = node1
1354 ctx1 = node1
1355 else:
1355 else:
1356 ctx1 = self[node1]
1356 ctx1 = self[node1]
1357 if isinstance(node2, context.changectx):
1357 if isinstance(node2, context.changectx):
1358 ctx2 = node2
1358 ctx2 = node2
1359 else:
1359 else:
1360 ctx2 = self[node2]
1360 ctx2 = self[node2]
1361
1361
1362 working = ctx2.rev() is None
1362 working = ctx2.rev() is None
1363 parentworking = working and ctx1 == self['.']
1363 parentworking = working and ctx1 == self['.']
1364 match = match or matchmod.always(self.root, self.getcwd())
1364 match = match or matchmod.always(self.root, self.getcwd())
1365 listignored, listclean, listunknown = ignored, clean, unknown
1365 listignored, listclean, listunknown = ignored, clean, unknown
1366
1366
1367 # load earliest manifest first for caching reasons
1367 # load earliest manifest first for caching reasons
1368 if not working and ctx2.rev() < ctx1.rev():
1368 if not working and ctx2.rev() < ctx1.rev():
1369 ctx2.manifest()
1369 ctx2.manifest()
1370
1370
1371 if not parentworking:
1371 if not parentworking:
1372 def bad(f, msg):
1372 def bad(f, msg):
1373 # 'f' may be a directory pattern from 'match.files()',
1373 # 'f' may be a directory pattern from 'match.files()',
1374 # so 'f not in ctx1' is not enough
1374 # so 'f not in ctx1' is not enough
1375 if f not in ctx1 and f not in ctx1.dirs():
1375 if f not in ctx1 and f not in ctx1.dirs():
1376 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1376 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1377 match.bad = bad
1377 match.bad = bad
1378
1378
1379 if working: # we need to scan the working dir
1379 if working: # we need to scan the working dir
1380 subrepos = []
1380 subrepos = []
1381 if '.hgsub' in self.dirstate:
1381 if '.hgsub' in self.dirstate:
1382 subrepos = ctx2.substate.keys()
1382 subrepos = ctx2.substate.keys()
1383 s = self.dirstate.status(match, subrepos, listignored,
1383 s = self.dirstate.status(match, subrepos, listignored,
1384 listclean, listunknown)
1384 listclean, listunknown)
1385 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1385 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1386
1386
1387 # check for any possibly clean files
1387 # check for any possibly clean files
1388 if parentworking and cmp:
1388 if parentworking and cmp:
1389 fixup = []
1389 fixup = []
1390 # do a full compare of any files that might have changed
1390 # do a full compare of any files that might have changed
1391 for f in sorted(cmp):
1391 for f in sorted(cmp):
1392 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1392 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1393 or ctx1[f].cmp(ctx2[f])):
1393 or ctx1[f].cmp(ctx2[f])):
1394 modified.append(f)
1394 modified.append(f)
1395 else:
1395 else:
1396 fixup.append(f)
1396 fixup.append(f)
1397
1397
1398 # update dirstate for files that are actually clean
1398 # update dirstate for files that are actually clean
1399 if fixup:
1399 if fixup:
1400 if listclean:
1400 if listclean:
1401 clean += fixup
1401 clean += fixup
1402
1402
1403 try:
1403 try:
1404 # updating the dirstate is optional
1404 # updating the dirstate is optional
1405 # so we don't wait on the lock
1405 # so we don't wait on the lock
1406 wlock = self.wlock(False)
1406 wlock = self.wlock(False)
1407 try:
1407 try:
1408 for f in fixup:
1408 for f in fixup:
1409 self.dirstate.normal(f)
1409 self.dirstate.normal(f)
1410 finally:
1410 finally:
1411 wlock.release()
1411 wlock.release()
1412 except error.LockError:
1412 except error.LockError:
1413 pass
1413 pass
1414
1414
1415 if not parentworking:
1415 if not parentworking:
1416 mf1 = mfmatches(ctx1)
1416 mf1 = mfmatches(ctx1)
1417 if working:
1417 if working:
1418 # we are comparing working dir against non-parent
1418 # we are comparing working dir against non-parent
1419 # generate a pseudo-manifest for the working dir
1419 # generate a pseudo-manifest for the working dir
1420 mf2 = mfmatches(self['.'])
1420 mf2 = mfmatches(self['.'])
1421 for f in cmp + modified + added:
1421 for f in cmp + modified + added:
1422 mf2[f] = None
1422 mf2[f] = None
1423 mf2.set(f, ctx2.flags(f))
1423 mf2.set(f, ctx2.flags(f))
1424 for f in removed:
1424 for f in removed:
1425 if f in mf2:
1425 if f in mf2:
1426 del mf2[f]
1426 del mf2[f]
1427 else:
1427 else:
1428 # we are comparing two revisions
1428 # we are comparing two revisions
1429 deleted, unknown, ignored = [], [], []
1429 deleted, unknown, ignored = [], [], []
1430 mf2 = mfmatches(ctx2)
1430 mf2 = mfmatches(ctx2)
1431
1431
1432 modified, added, clean = [], [], []
1432 modified, added, clean = [], [], []
1433 withflags = mf1.withflags() | mf2.withflags()
1433 withflags = mf1.withflags() | mf2.withflags()
1434 for fn in mf2:
1434 for fn in mf2:
1435 if fn in mf1:
1435 if fn in mf1:
1436 if (fn not in deleted and
1436 if (fn not in deleted and
1437 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1437 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1438 (mf1[fn] != mf2[fn] and
1438 (mf1[fn] != mf2[fn] and
1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1440 modified.append(fn)
1440 modified.append(fn)
1441 elif listclean:
1441 elif listclean:
1442 clean.append(fn)
1442 clean.append(fn)
1443 del mf1[fn]
1443 del mf1[fn]
1444 elif fn not in deleted:
1444 elif fn not in deleted:
1445 added.append(fn)
1445 added.append(fn)
1446 removed = mf1.keys()
1446 removed = mf1.keys()
1447
1447
1448 if working and modified and not self.dirstate._checklink:
1448 if working and modified and not self.dirstate._checklink:
1449 # Symlink placeholders may get non-symlink-like contents
1449 # Symlink placeholders may get non-symlink-like contents
1450 # via user error or dereferencing by NFS or Samba servers,
1450 # via user error or dereferencing by NFS or Samba servers,
1451 # so we filter out any placeholders that don't look like a
1451 # so we filter out any placeholders that don't look like a
1452 # symlink
1452 # symlink
1453 sane = []
1453 sane = []
1454 for f in modified:
1454 for f in modified:
1455 if ctx2.flags(f) == 'l':
1455 if ctx2.flags(f) == 'l':
1456 d = ctx2[f].data()
1456 d = ctx2[f].data()
1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1458 self.ui.debug('ignoring suspect symlink placeholder'
1458 self.ui.debug('ignoring suspect symlink placeholder'
1459 ' "%s"\n' % f)
1459 ' "%s"\n' % f)
1460 continue
1460 continue
1461 sane.append(f)
1461 sane.append(f)
1462 modified = sane
1462 modified = sane
1463
1463
1464 r = modified, added, removed, deleted, unknown, ignored, clean
1464 r = modified, added, removed, deleted, unknown, ignored, clean
1465
1465
1466 if listsubrepos:
1466 if listsubrepos:
1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1468 if working:
1468 if working:
1469 rev2 = None
1469 rev2 = None
1470 else:
1470 else:
1471 rev2 = ctx2.substate[subpath][1]
1471 rev2 = ctx2.substate[subpath][1]
1472 try:
1472 try:
1473 submatch = matchmod.narrowmatcher(subpath, match)
1473 submatch = matchmod.narrowmatcher(subpath, match)
1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1475 clean=listclean, unknown=listunknown,
1475 clean=listclean, unknown=listunknown,
1476 listsubrepos=True)
1476 listsubrepos=True)
1477 for rfiles, sfiles in zip(r, s):
1477 for rfiles, sfiles in zip(r, s):
1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1479 except error.LookupError:
1479 except error.LookupError:
1480 self.ui.status(_("skipping missing subrepository: %s\n")
1480 self.ui.status(_("skipping missing subrepository: %s\n")
1481 % subpath)
1481 % subpath)
1482
1482
1483 for l in r:
1483 for l in r:
1484 l.sort()
1484 l.sort()
1485 return r
1485 return r
1486
1486
1487 def heads(self, start=None):
1487 def heads(self, start=None):
1488 heads = self.changelog.heads(start)
1488 heads = self.changelog.heads(start)
1489 # sort the output in rev descending order
1489 # sort the output in rev descending order
1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1491
1491
1492 def branchheads(self, branch=None, start=None, closed=False):
1492 def branchheads(self, branch=None, start=None, closed=False):
1493 '''return a (possibly filtered) list of heads for the given branch
1493 '''return a (possibly filtered) list of heads for the given branch
1494
1494
1495 Heads are returned in topological order, from newest to oldest.
1495 Heads are returned in topological order, from newest to oldest.
1496 If branch is None, use the dirstate branch.
1496 If branch is None, use the dirstate branch.
1497 If start is not None, return only heads reachable from start.
1497 If start is not None, return only heads reachable from start.
1498 If closed is True, return heads that are marked as closed as well.
1498 If closed is True, return heads that are marked as closed as well.
1499 '''
1499 '''
1500 if branch is None:
1500 if branch is None:
1501 branch = self[None].branch()
1501 branch = self[None].branch()
1502 branches = self.branchmap()
1502 branches = self.branchmap()
1503 if branch not in branches:
1503 if branch not in branches:
1504 return []
1504 return []
1505 # the cache returns heads ordered lowest to highest
1505 # the cache returns heads ordered lowest to highest
1506 bheads = list(reversed(branches[branch]))
1506 bheads = list(reversed(branches[branch]))
1507 if start is not None:
1507 if start is not None:
1508 # filter out the heads that cannot be reached from startrev
1508 # filter out the heads that cannot be reached from startrev
1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1510 bheads = [h for h in bheads if h in fbheads]
1510 bheads = [h for h in bheads if h in fbheads]
1511 if not closed:
1511 if not closed:
1512 bheads = [h for h in bheads if not self[h].closesbranch()]
1512 bheads = [h for h in bheads if not self[h].closesbranch()]
1513 return bheads
1513 return bheads
1514
1514
1515 def branches(self, nodes):
1515 def branches(self, nodes):
1516 if not nodes:
1516 if not nodes:
1517 nodes = [self.changelog.tip()]
1517 nodes = [self.changelog.tip()]
1518 b = []
1518 b = []
1519 for n in nodes:
1519 for n in nodes:
1520 t = n
1520 t = n
1521 while True:
1521 while True:
1522 p = self.changelog.parents(n)
1522 p = self.changelog.parents(n)
1523 if p[1] != nullid or p[0] == nullid:
1523 if p[1] != nullid or p[0] == nullid:
1524 b.append((t, n, p[0], p[1]))
1524 b.append((t, n, p[0], p[1]))
1525 break
1525 break
1526 n = p[0]
1526 n = p[0]
1527 return b
1527 return b
1528
1528
1529 def between(self, pairs):
1529 def between(self, pairs):
1530 r = []
1530 r = []
1531
1531
1532 for top, bottom in pairs:
1532 for top, bottom in pairs:
1533 n, l, i = top, [], 0
1533 n, l, i = top, [], 0
1534 f = 1
1534 f = 1
1535
1535
1536 while n != bottom and n != nullid:
1536 while n != bottom and n != nullid:
1537 p = self.changelog.parents(n)[0]
1537 p = self.changelog.parents(n)[0]
1538 if i == f:
1538 if i == f:
1539 l.append(n)
1539 l.append(n)
1540 f = f * 2
1540 f = f * 2
1541 n = p
1541 n = p
1542 i += 1
1542 i += 1
1543
1543
1544 r.append(l)
1544 r.append(l)
1545
1545
1546 return r
1546 return r
1547
1547
1548 def pull(self, remote, heads=None, force=False):
1548 def pull(self, remote, heads=None, force=False):
1549 lock = self.lock()
1549 lock = self.lock()
1550 try:
1550 try:
1551 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1551 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1552 force=force)
1552 force=force)
1553 common, fetch, rheads = tmp
1553 common, fetch, rheads = tmp
1554 if not fetch:
1554 if not fetch:
1555 self.ui.status(_("no changes found\n"))
1555 self.ui.status(_("no changes found\n"))
1556 added = []
1556 added = []
1557 result = 0
1557 result = 0
1558 else:
1558 else:
1559 if heads is None and list(common) == [nullid]:
1559 if heads is None and list(common) == [nullid]:
1560 self.ui.status(_("requesting all changes\n"))
1560 self.ui.status(_("requesting all changes\n"))
1561 elif heads is None and remote.capable('changegroupsubset'):
1561 elif heads is None and remote.capable('changegroupsubset'):
1562 # issue1320, avoid a race if remote changed after discovery
1562 # issue1320, avoid a race if remote changed after discovery
1563 heads = rheads
1563 heads = rheads
1564
1564
1565 if remote.capable('getbundle'):
1565 if remote.capable('getbundle'):
1566 cg = remote.getbundle('pull', common=common,
1566 cg = remote.getbundle('pull', common=common,
1567 heads=heads or rheads)
1567 heads=heads or rheads)
1568 elif heads is None:
1568 elif heads is None:
1569 cg = remote.changegroup(fetch, 'pull')
1569 cg = remote.changegroup(fetch, 'pull')
1570 elif not remote.capable('changegroupsubset'):
1570 elif not remote.capable('changegroupsubset'):
1571 raise util.Abort(_("partial pull cannot be done because "
1571 raise util.Abort(_("partial pull cannot be done because "
1572 "other repository doesn't support "
1572 "other repository doesn't support "
1573 "changegroupsubset."))
1573 "changegroupsubset."))
1574 else:
1574 else:
1575 cg = remote.changegroupsubset(fetch, heads, 'pull')
1575 cg = remote.changegroupsubset(fetch, heads, 'pull')
1576 clstart = len(self.changelog)
1576 clstart = len(self.changelog)
1577 result = self.addchangegroup(cg, 'pull', remote.url())
1577 result = self.addchangegroup(cg, 'pull', remote.url())
1578 clend = len(self.changelog)
1578 clend = len(self.changelog)
1579 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1579 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1580
1580
1581 # compute target subset
1581 # compute target subset
1582 if heads is None:
1582 if heads is None:
1583 # We pulled every thing possible
1583 # We pulled every thing possible
1584 # sync on everything common
1584 # sync on everything common
1585 subset = common + added
1585 subset = common + added
1586 else:
1586 else:
1587 # We pulled a specific subset
1587 # We pulled a specific subset
1588 # sync on this subset
1588 # sync on this subset
1589 subset = heads
1589 subset = heads
1590
1590
1591 # Get remote phases data from remote
1591 # Get remote phases data from remote
1592 remotephases = remote.listkeys('phases')
1592 remotephases = remote.listkeys('phases')
1593 publishing = bool(remotephases.get('publishing', False))
1593 publishing = bool(remotephases.get('publishing', False))
1594 if remotephases and not publishing:
1594 if remotephases and not publishing:
1595 # remote is new and unpublishing
1595 # remote is new and unpublishing
1596 pheads, _dr = phases.analyzeremotephases(self, subset,
1596 pheads, _dr = phases.analyzeremotephases(self, subset,
1597 remotephases)
1597 remotephases)
1598 phases.advanceboundary(self, phases.public, pheads)
1598 phases.advanceboundary(self, phases.public, pheads)
1599 phases.advanceboundary(self, phases.draft, subset)
1599 phases.advanceboundary(self, phases.draft, subset)
1600 else:
1600 else:
1601 # Remote is old or publishing all common changesets
1601 # Remote is old or publishing all common changesets
1602 # should be seen as public
1602 # should be seen as public
1603 phases.advanceboundary(self, phases.public, subset)
1603 phases.advanceboundary(self, phases.public, subset)
1604 finally:
1604 finally:
1605 lock.release()
1605 lock.release()
1606
1606
1607 return result
1607 return result
1608
1608
1609 def checkpush(self, force, revs):
1609 def checkpush(self, force, revs):
1610 """Extensions can override this function if additional checks have
1610 """Extensions can override this function if additional checks have
1611 to be performed before pushing, or call it if they override push
1611 to be performed before pushing, or call it if they override push
1612 command.
1612 command.
1613 """
1613 """
1614 pass
1614 pass
1615
1615
1616 def push(self, remote, force=False, revs=None, newbranch=False):
1616 def push(self, remote, force=False, revs=None, newbranch=False):
1617 '''Push outgoing changesets (limited by revs) from the current
1617 '''Push outgoing changesets (limited by revs) from the current
1618 repository to remote. Return an integer:
1618 repository to remote. Return an integer:
1619 - None means nothing to push
1619 - None means nothing to push
1620 - 0 means HTTP error
1620 - 0 means HTTP error
1621 - 1 means we pushed and remote head count is unchanged *or*
1621 - 1 means we pushed and remote head count is unchanged *or*
1622 we have outgoing changesets but refused to push
1622 we have outgoing changesets but refused to push
1623 - other values as described by addchangegroup()
1623 - other values as described by addchangegroup()
1624 '''
1624 '''
1625 # there are two ways to push to remote repo:
1625 # there are two ways to push to remote repo:
1626 #
1626 #
1627 # addchangegroup assumes local user can lock remote
1627 # addchangegroup assumes local user can lock remote
1628 # repo (local filesystem, old ssh servers).
1628 # repo (local filesystem, old ssh servers).
1629 #
1629 #
1630 # unbundle assumes local user cannot lock remote repo (new ssh
1630 # unbundle assumes local user cannot lock remote repo (new ssh
1631 # servers, http servers).
1631 # servers, http servers).
1632
1632
1633 # get local lock as we might write phase data
1633 # get local lock as we might write phase data
1634 locallock = self.lock()
1634 locallock = self.lock()
1635 try:
1635 try:
1636 self.checkpush(force, revs)
1636 self.checkpush(force, revs)
1637 lock = None
1637 lock = None
1638 unbundle = remote.capable('unbundle')
1638 unbundle = remote.capable('unbundle')
1639 if not unbundle:
1639 if not unbundle:
1640 lock = remote.lock()
1640 lock = remote.lock()
1641 try:
1641 try:
1642 # discovery
1642 # discovery
1643 fci = discovery.findcommonincoming
1643 fci = discovery.findcommonincoming
1644 commoninc = fci(self, remote, force=force)
1644 commoninc = fci(self, remote, force=force)
1645 common, inc, remoteheads = commoninc
1645 common, inc, remoteheads = commoninc
1646 fco = discovery.findcommonoutgoing
1646 fco = discovery.findcommonoutgoing
1647 outgoing = fco(self, remote, onlyheads=revs,
1647 outgoing = fco(self, remote, onlyheads=revs,
1648 commoninc=commoninc, force=force)
1648 commoninc=commoninc, force=force)
1649
1649
1650
1650
1651 if not outgoing.missing:
1651 if not outgoing.missing:
1652 # nothing to push
1652 # nothing to push
1653 scmutil.nochangesfound(self.ui, outgoing.excluded)
1653 scmutil.nochangesfound(self.ui, outgoing.excluded)
1654 ret = None
1654 ret = None
1655 else:
1655 else:
1656 # something to push
1656 # something to push
1657 if not force:
1657 if not force:
1658 discovery.checkheads(self, remote, outgoing,
1658 discovery.checkheads(self, remote, outgoing,
1659 remoteheads, newbranch,
1659 remoteheads, newbranch,
1660 bool(inc))
1660 bool(inc))
1661
1661
1662 # create a changegroup from local
1662 # create a changegroup from local
1663 if revs is None and not outgoing.excluded:
1663 if revs is None and not outgoing.excluded:
1664 # push everything,
1664 # push everything,
1665 # use the fast path, no race possible on push
1665 # use the fast path, no race possible on push
1666 cg = self._changegroup(outgoing.missing, 'push')
1666 cg = self._changegroup(outgoing.missing, 'push')
1667 else:
1667 else:
1668 cg = self.getlocalbundle('push', outgoing)
1668 cg = self.getlocalbundle('push', outgoing)
1669
1669
1670 # apply changegroup to remote
1670 # apply changegroup to remote
1671 if unbundle:
1671 if unbundle:
1672 # local repo finds heads on server, finds out what
1672 # local repo finds heads on server, finds out what
1673 # revs it must push. once revs transferred, if server
1673 # revs it must push. once revs transferred, if server
1674 # finds it has different heads (someone else won
1674 # finds it has different heads (someone else won
1675 # commit/push race), server aborts.
1675 # commit/push race), server aborts.
1676 if force:
1676 if force:
1677 remoteheads = ['force']
1677 remoteheads = ['force']
1678 # ssh: return remote's addchangegroup()
1678 # ssh: return remote's addchangegroup()
1679 # http: return remote's addchangegroup() or 0 for error
1679 # http: return remote's addchangegroup() or 0 for error
1680 ret = remote.unbundle(cg, remoteheads, 'push')
1680 ret = remote.unbundle(cg, remoteheads, 'push')
1681 else:
1681 else:
1682 # we return an integer indicating remote head count
1682 # we return an integer indicating remote head count
1683 # change
1683 # change
1684 ret = remote.addchangegroup(cg, 'push', self.url())
1684 ret = remote.addchangegroup(cg, 'push', self.url())
1685
1685
1686 if ret:
1686 if ret:
1687 # push succeed, synchonize target of the push
1687 # push succeed, synchonize target of the push
1688 cheads = outgoing.missingheads
1688 cheads = outgoing.missingheads
1689 elif revs is None:
1689 elif revs is None:
1690 # All out push fails. synchronize all common
1690 # All out push fails. synchronize all common
1691 cheads = outgoing.commonheads
1691 cheads = outgoing.commonheads
1692 else:
1692 else:
1693 # I want cheads = heads(::missingheads and ::commonheads)
1693 # I want cheads = heads(::missingheads and ::commonheads)
1694 # (missingheads is revs with secret changeset filtered out)
1694 # (missingheads is revs with secret changeset filtered out)
1695 #
1695 #
1696 # This can be expressed as:
1696 # This can be expressed as:
1697 # cheads = ( (missingheads and ::commonheads)
1697 # cheads = ( (missingheads and ::commonheads)
1698 # + (commonheads and ::missingheads))"
1698 # + (commonheads and ::missingheads))"
1699 # )
1699 # )
1700 #
1700 #
1701 # while trying to push we already computed the following:
1701 # while trying to push we already computed the following:
1702 # common = (::commonheads)
1702 # common = (::commonheads)
1703 # missing = ((commonheads::missingheads) - commonheads)
1703 # missing = ((commonheads::missingheads) - commonheads)
1704 #
1704 #
1705 # We can pick:
1705 # We can pick:
1706 # * missingheads part of comon (::commonheads)
1706 # * missingheads part of comon (::commonheads)
1707 common = set(outgoing.common)
1707 common = set(outgoing.common)
1708 cheads = [node for node in revs if node in common]
1708 cheads = [node for node in revs if node in common]
1709 # and
1709 # and
1710 # * commonheads parents on missing
1710 # * commonheads parents on missing
1711 revset = self.set('%ln and parents(roots(%ln))',
1711 revset = self.set('%ln and parents(roots(%ln))',
1712 outgoing.commonheads,
1712 outgoing.commonheads,
1713 outgoing.missing)
1713 outgoing.missing)
1714 cheads.extend(c.node() for c in revset)
1714 cheads.extend(c.node() for c in revset)
1715 # even when we don't push, exchanging phase data is useful
1715 # even when we don't push, exchanging phase data is useful
1716 remotephases = remote.listkeys('phases')
1716 remotephases = remote.listkeys('phases')
1717 if not remotephases: # old server or public only repo
1717 if not remotephases: # old server or public only repo
1718 phases.advanceboundary(self, phases.public, cheads)
1718 phases.advanceboundary(self, phases.public, cheads)
1719 # don't push any phase data as there is nothing to push
1719 # don't push any phase data as there is nothing to push
1720 else:
1720 else:
1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1722 pheads, droots = ana
1722 pheads, droots = ana
1723 ### Apply remote phase on local
1723 ### Apply remote phase on local
1724 if remotephases.get('publishing', False):
1724 if remotephases.get('publishing', False):
1725 phases.advanceboundary(self, phases.public, cheads)
1725 phases.advanceboundary(self, phases.public, cheads)
1726 else: # publish = False
1726 else: # publish = False
1727 phases.advanceboundary(self, phases.public, pheads)
1727 phases.advanceboundary(self, phases.public, pheads)
1728 phases.advanceboundary(self, phases.draft, cheads)
1728 phases.advanceboundary(self, phases.draft, cheads)
1729 ### Apply local phase on remote
1729 ### Apply local phase on remote
1730
1730
1731 # Get the list of all revs draft on remote by public here.
1731 # Get the list of all revs draft on remote by public here.
1732 # XXX Beware that revset break if droots is not strictly
1732 # XXX Beware that revset break if droots is not strictly
1733 # XXX root we may want to ensure it is but it is costly
1733 # XXX root we may want to ensure it is but it is costly
1734 outdated = self.set('heads((%ln::%ln) and public())',
1734 outdated = self.set('heads((%ln::%ln) and public())',
1735 droots, cheads)
1735 droots, cheads)
1736 for newremotehead in outdated:
1736 for newremotehead in outdated:
1737 r = remote.pushkey('phases',
1737 r = remote.pushkey('phases',
1738 newremotehead.hex(),
1738 newremotehead.hex(),
1739 str(phases.draft),
1739 str(phases.draft),
1740 str(phases.public))
1740 str(phases.public))
1741 if not r:
1741 if not r:
1742 self.ui.warn(_('updating %s to public failed!\n')
1742 self.ui.warn(_('updating %s to public failed!\n')
1743 % newremotehead)
1743 % newremotehead)
1744 finally:
1744 finally:
1745 if lock is not None:
1745 if lock is not None:
1746 lock.release()
1746 lock.release()
1747 finally:
1747 finally:
1748 locallock.release()
1748 locallock.release()
1749
1749
1750 self.ui.debug("checking for updated bookmarks\n")
1750 self.ui.debug("checking for updated bookmarks\n")
1751 rb = remote.listkeys('bookmarks')
1751 rb = remote.listkeys('bookmarks')
1752 for k in rb.keys():
1752 for k in rb.keys():
1753 if k in self._bookmarks:
1753 if k in self._bookmarks:
1754 nr, nl = rb[k], hex(self._bookmarks[k])
1754 nr, nl = rb[k], hex(self._bookmarks[k])
1755 if nr in self:
1755 if nr in self:
1756 cr = self[nr]
1756 cr = self[nr]
1757 cl = self[nl]
1757 cl = self[nl]
1758 if cl in cr.descendants():
1758 if cl in cr.descendants():
1759 r = remote.pushkey('bookmarks', k, nr, nl)
1759 r = remote.pushkey('bookmarks', k, nr, nl)
1760 if r:
1760 if r:
1761 self.ui.status(_("updating bookmark %s\n") % k)
1761 self.ui.status(_("updating bookmark %s\n") % k)
1762 else:
1762 else:
1763 self.ui.warn(_('updating bookmark %s'
1763 self.ui.warn(_('updating bookmark %s'
1764 ' failed!\n') % k)
1764 ' failed!\n') % k)
1765
1765
1766 return ret
1766 return ret
1767
1767
1768 def changegroupinfo(self, nodes, source):
1768 def changegroupinfo(self, nodes, source):
1769 if self.ui.verbose or source == 'bundle':
1769 if self.ui.verbose or source == 'bundle':
1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1771 if self.ui.debugflag:
1771 if self.ui.debugflag:
1772 self.ui.debug("list of changesets:\n")
1772 self.ui.debug("list of changesets:\n")
1773 for node in nodes:
1773 for node in nodes:
1774 self.ui.debug("%s\n" % hex(node))
1774 self.ui.debug("%s\n" % hex(node))
1775
1775
1776 def changegroupsubset(self, bases, heads, source):
1776 def changegroupsubset(self, bases, heads, source):
1777 """Compute a changegroup consisting of all the nodes that are
1777 """Compute a changegroup consisting of all the nodes that are
1778 descendants of any of the bases and ancestors of any of the heads.
1778 descendants of any of the bases and ancestors of any of the heads.
1779 Return a chunkbuffer object whose read() method will return
1779 Return a chunkbuffer object whose read() method will return
1780 successive changegroup chunks.
1780 successive changegroup chunks.
1781
1781
1782 It is fairly complex as determining which filenodes and which
1782 It is fairly complex as determining which filenodes and which
1783 manifest nodes need to be included for the changeset to be complete
1783 manifest nodes need to be included for the changeset to be complete
1784 is non-trivial.
1784 is non-trivial.
1785
1785
1786 Another wrinkle is doing the reverse, figuring out which changeset in
1786 Another wrinkle is doing the reverse, figuring out which changeset in
1787 the changegroup a particular filenode or manifestnode belongs to.
1787 the changegroup a particular filenode or manifestnode belongs to.
1788 """
1788 """
1789 cl = self.changelog
1789 cl = self.changelog
1790 if not bases:
1790 if not bases:
1791 bases = [nullid]
1791 bases = [nullid]
1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1793 # We assume that all ancestors of bases are known
1793 # We assume that all ancestors of bases are known
1794 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1794 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1795 return self._changegroupsubset(common, csets, heads, source)
1795 return self._changegroupsubset(common, csets, heads, source)
1796
1796
1797 def getlocalbundle(self, source, outgoing):
1797 def getlocalbundle(self, source, outgoing):
1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1799
1799
1800 This is only implemented for local repos and reuses potentially
1800 This is only implemented for local repos and reuses potentially
1801 precomputed sets in outgoing."""
1801 precomputed sets in outgoing."""
1802 if not outgoing.missing:
1802 if not outgoing.missing:
1803 return None
1803 return None
1804 return self._changegroupsubset(outgoing.common,
1804 return self._changegroupsubset(outgoing.common,
1805 outgoing.missing,
1805 outgoing.missing,
1806 outgoing.missingheads,
1806 outgoing.missingheads,
1807 source)
1807 source)
1808
1808
1809 def getbundle(self, source, heads=None, common=None):
1809 def getbundle(self, source, heads=None, common=None):
1810 """Like changegroupsubset, but returns the set difference between the
1810 """Like changegroupsubset, but returns the set difference between the
1811 ancestors of heads and the ancestors common.
1811 ancestors of heads and the ancestors common.
1812
1812
1813 If heads is None, use the local heads. If common is None, use [nullid].
1813 If heads is None, use the local heads. If common is None, use [nullid].
1814
1814
1815 The nodes in common might not all be known locally due to the way the
1815 The nodes in common might not all be known locally due to the way the
1816 current discovery protocol works.
1816 current discovery protocol works.
1817 """
1817 """
1818 cl = self.changelog
1818 cl = self.changelog
1819 if common:
1819 if common:
1820 nm = cl.nodemap
1820 nm = cl.nodemap
1821 common = [n for n in common if n in nm]
1821 common = [n for n in common if n in nm]
1822 else:
1822 else:
1823 common = [nullid]
1823 common = [nullid]
1824 if not heads:
1824 if not heads:
1825 heads = cl.heads()
1825 heads = cl.heads()
1826 return self.getlocalbundle(source,
1826 return self.getlocalbundle(source,
1827 discovery.outgoing(cl, common, heads))
1827 discovery.outgoing(cl, common, heads))
1828
1828
1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1830
1830
1831 cl = self.changelog
1831 cl = self.changelog
1832 mf = self.manifest
1832 mf = self.manifest
1833 mfs = {} # needed manifests
1833 mfs = {} # needed manifests
1834 fnodes = {} # needed file nodes
1834 fnodes = {} # needed file nodes
1835 changedfiles = set()
1835 changedfiles = set()
1836 fstate = ['', {}]
1836 fstate = ['', {}]
1837 count = [0, 0]
1837 count = [0, 0]
1838
1838
1839 # can we go through the fast path ?
1839 # can we go through the fast path ?
1840 heads.sort()
1840 heads.sort()
1841 if heads == sorted(self.heads()):
1841 if heads == sorted(self.heads()):
1842 return self._changegroup(csets, source)
1842 return self._changegroup(csets, source)
1843
1843
1844 # slow path
1844 # slow path
1845 self.hook('preoutgoing', throw=True, source=source)
1845 self.hook('preoutgoing', throw=True, source=source)
1846 self.changegroupinfo(csets, source)
1846 self.changegroupinfo(csets, source)
1847
1847
1848 # filter any nodes that claim to be part of the known set
1848 # filter any nodes that claim to be part of the known set
1849 def prune(revlog, missing):
1849 def prune(revlog, missing):
1850 rr, rl = revlog.rev, revlog.linkrev
1850 rr, rl = revlog.rev, revlog.linkrev
1851 return [n for n in missing
1851 return [n for n in missing
1852 if rl(rr(n)) not in commonrevs]
1852 if rl(rr(n)) not in commonrevs]
1853
1853
1854 progress = self.ui.progress
1854 progress = self.ui.progress
1855 _bundling = _('bundling')
1855 _bundling = _('bundling')
1856 _changesets = _('changesets')
1856 _changesets = _('changesets')
1857 _manifests = _('manifests')
1857 _manifests = _('manifests')
1858 _files = _('files')
1858 _files = _('files')
1859
1859
1860 def lookup(revlog, x):
1860 def lookup(revlog, x):
1861 if revlog == cl:
1861 if revlog == cl:
1862 c = cl.read(x)
1862 c = cl.read(x)
1863 changedfiles.update(c[3])
1863 changedfiles.update(c[3])
1864 mfs.setdefault(c[0], x)
1864 mfs.setdefault(c[0], x)
1865 count[0] += 1
1865 count[0] += 1
1866 progress(_bundling, count[0],
1866 progress(_bundling, count[0],
1867 unit=_changesets, total=count[1])
1867 unit=_changesets, total=count[1])
1868 return x
1868 return x
1869 elif revlog == mf:
1869 elif revlog == mf:
1870 clnode = mfs[x]
1870 clnode = mfs[x]
1871 mdata = mf.readfast(x)
1871 mdata = mf.readfast(x)
1872 for f, n in mdata.iteritems():
1872 for f, n in mdata.iteritems():
1873 if f in changedfiles:
1873 if f in changedfiles:
1874 fnodes[f].setdefault(n, clnode)
1874 fnodes[f].setdefault(n, clnode)
1875 count[0] += 1
1875 count[0] += 1
1876 progress(_bundling, count[0],
1876 progress(_bundling, count[0],
1877 unit=_manifests, total=count[1])
1877 unit=_manifests, total=count[1])
1878 return clnode
1878 return clnode
1879 else:
1879 else:
1880 progress(_bundling, count[0], item=fstate[0],
1880 progress(_bundling, count[0], item=fstate[0],
1881 unit=_files, total=count[1])
1881 unit=_files, total=count[1])
1882 return fstate[1][x]
1882 return fstate[1][x]
1883
1883
1884 bundler = changegroup.bundle10(lookup)
1884 bundler = changegroup.bundle10(lookup)
1885 reorder = self.ui.config('bundle', 'reorder', 'auto')
1885 reorder = self.ui.config('bundle', 'reorder', 'auto')
1886 if reorder == 'auto':
1886 if reorder == 'auto':
1887 reorder = None
1887 reorder = None
1888 else:
1888 else:
1889 reorder = util.parsebool(reorder)
1889 reorder = util.parsebool(reorder)
1890
1890
1891 def gengroup():
1891 def gengroup():
1892 # Create a changenode group generator that will call our functions
1892 # Create a changenode group generator that will call our functions
1893 # back to lookup the owning changenode and collect information.
1893 # back to lookup the owning changenode and collect information.
1894 count[:] = [0, len(csets)]
1894 count[:] = [0, len(csets)]
1895 for chunk in cl.group(csets, bundler, reorder=reorder):
1895 for chunk in cl.group(csets, bundler, reorder=reorder):
1896 yield chunk
1896 yield chunk
1897 progress(_bundling, None)
1897 progress(_bundling, None)
1898
1898
1899 # Create a generator for the manifestnodes that calls our lookup
1899 # Create a generator for the manifestnodes that calls our lookup
1900 # and data collection functions back.
1900 # and data collection functions back.
1901 for f in changedfiles:
1901 for f in changedfiles:
1902 fnodes[f] = {}
1902 fnodes[f] = {}
1903 count[:] = [0, len(mfs)]
1903 count[:] = [0, len(mfs)]
1904 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1904 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1905 yield chunk
1905 yield chunk
1906 progress(_bundling, None)
1906 progress(_bundling, None)
1907
1907
1908 mfs.clear()
1908 mfs.clear()
1909
1909
1910 # Go through all our files in order sorted by name.
1910 # Go through all our files in order sorted by name.
1911 count[:] = [0, len(changedfiles)]
1911 count[:] = [0, len(changedfiles)]
1912 for fname in sorted(changedfiles):
1912 for fname in sorted(changedfiles):
1913 filerevlog = self.file(fname)
1913 filerevlog = self.file(fname)
1914 if not len(filerevlog):
1914 if not len(filerevlog):
1915 raise util.Abort(_("empty or missing revlog for %s")
1915 raise util.Abort(_("empty or missing revlog for %s")
1916 % fname)
1916 % fname)
1917 fstate[0] = fname
1917 fstate[0] = fname
1918 fstate[1] = fnodes.pop(fname, {})
1918 fstate[1] = fnodes.pop(fname, {})
1919
1919
1920 nodelist = prune(filerevlog, fstate[1])
1920 nodelist = prune(filerevlog, fstate[1])
1921 if nodelist:
1921 if nodelist:
1922 count[0] += 1
1922 count[0] += 1
1923 yield bundler.fileheader(fname)
1923 yield bundler.fileheader(fname)
1924 for chunk in filerevlog.group(nodelist, bundler, reorder):
1924 for chunk in filerevlog.group(nodelist, bundler, reorder):
1925 yield chunk
1925 yield chunk
1926
1926
1927 # Signal that no more groups are left.
1927 # Signal that no more groups are left.
1928 yield bundler.close()
1928 yield bundler.close()
1929 progress(_bundling, None)
1929 progress(_bundling, None)
1930
1930
1931 if csets:
1931 if csets:
1932 self.hook('outgoing', node=hex(csets[0]), source=source)
1932 self.hook('outgoing', node=hex(csets[0]), source=source)
1933
1933
1934 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1934 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1935
1935
1936 def changegroup(self, basenodes, source):
1936 def changegroup(self, basenodes, source):
1937 # to avoid a race we use changegroupsubset() (issue1320)
1937 # to avoid a race we use changegroupsubset() (issue1320)
1938 return self.changegroupsubset(basenodes, self.heads(), source)
1938 return self.changegroupsubset(basenodes, self.heads(), source)
1939
1939
1940 def _changegroup(self, nodes, source):
1940 def _changegroup(self, nodes, source):
1941 """Compute the changegroup of all nodes that we have that a recipient
1941 """Compute the changegroup of all nodes that we have that a recipient
1942 doesn't. Return a chunkbuffer object whose read() method will return
1942 doesn't. Return a chunkbuffer object whose read() method will return
1943 successive changegroup chunks.
1943 successive changegroup chunks.
1944
1944
1945 This is much easier than the previous function as we can assume that
1945 This is much easier than the previous function as we can assume that
1946 the recipient has any changenode we aren't sending them.
1946 the recipient has any changenode we aren't sending them.
1947
1947
1948 nodes is the set of nodes to send"""
1948 nodes is the set of nodes to send"""
1949
1949
1950 cl = self.changelog
1950 cl = self.changelog
1951 mf = self.manifest
1951 mf = self.manifest
1952 mfs = {}
1952 mfs = {}
1953 changedfiles = set()
1953 changedfiles = set()
1954 fstate = ['']
1954 fstate = ['']
1955 count = [0, 0]
1955 count = [0, 0]
1956
1956
1957 self.hook('preoutgoing', throw=True, source=source)
1957 self.hook('preoutgoing', throw=True, source=source)
1958 self.changegroupinfo(nodes, source)
1958 self.changegroupinfo(nodes, source)
1959
1959
1960 revset = set([cl.rev(n) for n in nodes])
1960 revset = set([cl.rev(n) for n in nodes])
1961
1961
1962 def gennodelst(log):
1962 def gennodelst(log):
1963 ln, llr = log.node, log.linkrev
1963 ln, llr = log.node, log.linkrev
1964 return [ln(r) for r in log if llr(r) in revset]
1964 return [ln(r) for r in log if llr(r) in revset]
1965
1965
1966 progress = self.ui.progress
1966 progress = self.ui.progress
1967 _bundling = _('bundling')
1967 _bundling = _('bundling')
1968 _changesets = _('changesets')
1968 _changesets = _('changesets')
1969 _manifests = _('manifests')
1969 _manifests = _('manifests')
1970 _files = _('files')
1970 _files = _('files')
1971
1971
1972 def lookup(revlog, x):
1972 def lookup(revlog, x):
1973 if revlog == cl:
1973 if revlog == cl:
1974 c = cl.read(x)
1974 c = cl.read(x)
1975 changedfiles.update(c[3])
1975 changedfiles.update(c[3])
1976 mfs.setdefault(c[0], x)
1976 mfs.setdefault(c[0], x)
1977 count[0] += 1
1977 count[0] += 1
1978 progress(_bundling, count[0],
1978 progress(_bundling, count[0],
1979 unit=_changesets, total=count[1])
1979 unit=_changesets, total=count[1])
1980 return x
1980 return x
1981 elif revlog == mf:
1981 elif revlog == mf:
1982 count[0] += 1
1982 count[0] += 1
1983 progress(_bundling, count[0],
1983 progress(_bundling, count[0],
1984 unit=_manifests, total=count[1])
1984 unit=_manifests, total=count[1])
1985 return cl.node(revlog.linkrev(revlog.rev(x)))
1985 return cl.node(revlog.linkrev(revlog.rev(x)))
1986 else:
1986 else:
1987 progress(_bundling, count[0], item=fstate[0],
1987 progress(_bundling, count[0], item=fstate[0],
1988 total=count[1], unit=_files)
1988 total=count[1], unit=_files)
1989 return cl.node(revlog.linkrev(revlog.rev(x)))
1989 return cl.node(revlog.linkrev(revlog.rev(x)))
1990
1990
1991 bundler = changegroup.bundle10(lookup)
1991 bundler = changegroup.bundle10(lookup)
1992 reorder = self.ui.config('bundle', 'reorder', 'auto')
1992 reorder = self.ui.config('bundle', 'reorder', 'auto')
1993 if reorder == 'auto':
1993 if reorder == 'auto':
1994 reorder = None
1994 reorder = None
1995 else:
1995 else:
1996 reorder = util.parsebool(reorder)
1996 reorder = util.parsebool(reorder)
1997
1997
1998 def gengroup():
1998 def gengroup():
1999 '''yield a sequence of changegroup chunks (strings)'''
1999 '''yield a sequence of changegroup chunks (strings)'''
2000 # construct a list of all changed files
2000 # construct a list of all changed files
2001
2001
2002 count[:] = [0, len(nodes)]
2002 count[:] = [0, len(nodes)]
2003 for chunk in cl.group(nodes, bundler, reorder=reorder):
2003 for chunk in cl.group(nodes, bundler, reorder=reorder):
2004 yield chunk
2004 yield chunk
2005 progress(_bundling, None)
2005 progress(_bundling, None)
2006
2006
2007 count[:] = [0, len(mfs)]
2007 count[:] = [0, len(mfs)]
2008 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2008 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2009 yield chunk
2009 yield chunk
2010 progress(_bundling, None)
2010 progress(_bundling, None)
2011
2011
2012 count[:] = [0, len(changedfiles)]
2012 count[:] = [0, len(changedfiles)]
2013 for fname in sorted(changedfiles):
2013 for fname in sorted(changedfiles):
2014 filerevlog = self.file(fname)
2014 filerevlog = self.file(fname)
2015 if not len(filerevlog):
2015 if not len(filerevlog):
2016 raise util.Abort(_("empty or missing revlog for %s")
2016 raise util.Abort(_("empty or missing revlog for %s")
2017 % fname)
2017 % fname)
2018 fstate[0] = fname
2018 fstate[0] = fname
2019 nodelist = gennodelst(filerevlog)
2019 nodelist = gennodelst(filerevlog)
2020 if nodelist:
2020 if nodelist:
2021 count[0] += 1
2021 count[0] += 1
2022 yield bundler.fileheader(fname)
2022 yield bundler.fileheader(fname)
2023 for chunk in filerevlog.group(nodelist, bundler, reorder):
2023 for chunk in filerevlog.group(nodelist, bundler, reorder):
2024 yield chunk
2024 yield chunk
2025 yield bundler.close()
2025 yield bundler.close()
2026 progress(_bundling, None)
2026 progress(_bundling, None)
2027
2027
2028 if nodes:
2028 if nodes:
2029 self.hook('outgoing', node=hex(nodes[0]), source=source)
2029 self.hook('outgoing', node=hex(nodes[0]), source=source)
2030
2030
2031 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2031 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2032
2032
2033 def addchangegroup(self, source, srctype, url, emptyok=False):
2033 def addchangegroup(self, source, srctype, url, emptyok=False):
2034 """Add the changegroup returned by source.read() to this repo.
2034 """Add the changegroup returned by source.read() to this repo.
2035 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2035 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2036 the URL of the repo where this changegroup is coming from.
2036 the URL of the repo where this changegroup is coming from.
2037
2037
2038 Return an integer summarizing the change to this repo:
2038 Return an integer summarizing the change to this repo:
2039 - nothing changed or no source: 0
2039 - nothing changed or no source: 0
2040 - more heads than before: 1+added heads (2..n)
2040 - more heads than before: 1+added heads (2..n)
2041 - fewer heads than before: -1-removed heads (-2..-n)
2041 - fewer heads than before: -1-removed heads (-2..-n)
2042 - number of heads stays the same: 1
2042 - number of heads stays the same: 1
2043 """
2043 """
2044 def csmap(x):
2044 def csmap(x):
2045 self.ui.debug("add changeset %s\n" % short(x))
2045 self.ui.debug("add changeset %s\n" % short(x))
2046 return len(cl)
2046 return len(cl)
2047
2047
2048 def revmap(x):
2048 def revmap(x):
2049 return cl.rev(x)
2049 return cl.rev(x)
2050
2050
2051 if not source:
2051 if not source:
2052 return 0
2052 return 0
2053
2053
2054 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2054 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2055
2055
2056 changesets = files = revisions = 0
2056 changesets = files = revisions = 0
2057 efiles = set()
2057 efiles = set()
2058
2058
2059 # write changelog data to temp files so concurrent readers will not see
2059 # write changelog data to temp files so concurrent readers will not see
2060 # inconsistent view
2060 # inconsistent view
2061 cl = self.changelog
2061 cl = self.changelog
2062 cl.delayupdate()
2062 cl.delayupdate()
2063 oldheads = cl.heads()
2063 oldheads = cl.heads()
2064
2064
2065 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2065 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2066 try:
2066 try:
2067 trp = weakref.proxy(tr)
2067 trp = weakref.proxy(tr)
2068 # pull off the changeset group
2068 # pull off the changeset group
2069 self.ui.status(_("adding changesets\n"))
2069 self.ui.status(_("adding changesets\n"))
2070 clstart = len(cl)
2070 clstart = len(cl)
2071 class prog(object):
2071 class prog(object):
2072 step = _('changesets')
2072 step = _('changesets')
2073 count = 1
2073 count = 1
2074 ui = self.ui
2074 ui = self.ui
2075 total = None
2075 total = None
2076 def __call__(self):
2076 def __call__(self):
2077 self.ui.progress(self.step, self.count, unit=_('chunks'),
2077 self.ui.progress(self.step, self.count, unit=_('chunks'),
2078 total=self.total)
2078 total=self.total)
2079 self.count += 1
2079 self.count += 1
2080 pr = prog()
2080 pr = prog()
2081 source.callback = pr
2081 source.callback = pr
2082
2082
2083 source.changelogheader()
2083 source.changelogheader()
2084 srccontent = cl.addgroup(source, csmap, trp)
2084 srccontent = cl.addgroup(source, csmap, trp)
2085 if not (srccontent or emptyok):
2085 if not (srccontent or emptyok):
2086 raise util.Abort(_("received changelog group is empty"))
2086 raise util.Abort(_("received changelog group is empty"))
2087 clend = len(cl)
2087 clend = len(cl)
2088 changesets = clend - clstart
2088 changesets = clend - clstart
2089 for c in xrange(clstart, clend):
2089 for c in xrange(clstart, clend):
2090 efiles.update(self[c].files())
2090 efiles.update(self[c].files())
2091 efiles = len(efiles)
2091 efiles = len(efiles)
2092 self.ui.progress(_('changesets'), None)
2092 self.ui.progress(_('changesets'), None)
2093
2093
2094 # pull off the manifest group
2094 # pull off the manifest group
2095 self.ui.status(_("adding manifests\n"))
2095 self.ui.status(_("adding manifests\n"))
2096 pr.step = _('manifests')
2096 pr.step = _('manifests')
2097 pr.count = 1
2097 pr.count = 1
2098 pr.total = changesets # manifests <= changesets
2098 pr.total = changesets # manifests <= changesets
2099 # no need to check for empty manifest group here:
2099 # no need to check for empty manifest group here:
2100 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2100 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2101 # no new manifest will be created and the manifest group will
2101 # no new manifest will be created and the manifest group will
2102 # be empty during the pull
2102 # be empty during the pull
2103 source.manifestheader()
2103 source.manifestheader()
2104 self.manifest.addgroup(source, revmap, trp)
2104 self.manifest.addgroup(source, revmap, trp)
2105 self.ui.progress(_('manifests'), None)
2105 self.ui.progress(_('manifests'), None)
2106
2106
2107 needfiles = {}
2107 needfiles = {}
2108 if self.ui.configbool('server', 'validate', default=False):
2108 if self.ui.configbool('server', 'validate', default=False):
2109 # validate incoming csets have their manifests
2109 # validate incoming csets have their manifests
2110 for cset in xrange(clstart, clend):
2110 for cset in xrange(clstart, clend):
2111 mfest = self.changelog.read(self.changelog.node(cset))[0]
2111 mfest = self.changelog.read(self.changelog.node(cset))[0]
2112 mfest = self.manifest.readdelta(mfest)
2112 mfest = self.manifest.readdelta(mfest)
2113 # store file nodes we must see
2113 # store file nodes we must see
2114 for f, n in mfest.iteritems():
2114 for f, n in mfest.iteritems():
2115 needfiles.setdefault(f, set()).add(n)
2115 needfiles.setdefault(f, set()).add(n)
2116
2116
2117 # process the files
2117 # process the files
2118 self.ui.status(_("adding file changes\n"))
2118 self.ui.status(_("adding file changes\n"))
2119 pr.step = _('files')
2119 pr.step = _('files')
2120 pr.count = 1
2120 pr.count = 1
2121 pr.total = efiles
2121 pr.total = efiles
2122 source.callback = None
2122 source.callback = None
2123
2123
2124 while True:
2124 while True:
2125 chunkdata = source.filelogheader()
2125 chunkdata = source.filelogheader()
2126 if not chunkdata:
2126 if not chunkdata:
2127 break
2127 break
2128 f = chunkdata["filename"]
2128 f = chunkdata["filename"]
2129 self.ui.debug("adding %s revisions\n" % f)
2129 self.ui.debug("adding %s revisions\n" % f)
2130 pr()
2130 pr()
2131 fl = self.file(f)
2131 fl = self.file(f)
2132 o = len(fl)
2132 o = len(fl)
2133 if not fl.addgroup(source, revmap, trp):
2133 if not fl.addgroup(source, revmap, trp):
2134 raise util.Abort(_("received file revlog group is empty"))
2134 raise util.Abort(_("received file revlog group is empty"))
2135 revisions += len(fl) - o
2135 revisions += len(fl) - o
2136 files += 1
2136 files += 1
2137 if f in needfiles:
2137 if f in needfiles:
2138 needs = needfiles[f]
2138 needs = needfiles[f]
2139 for new in xrange(o, len(fl)):
2139 for new in xrange(o, len(fl)):
2140 n = fl.node(new)
2140 n = fl.node(new)
2141 if n in needs:
2141 if n in needs:
2142 needs.remove(n)
2142 needs.remove(n)
2143 if not needs:
2143 if not needs:
2144 del needfiles[f]
2144 del needfiles[f]
2145 self.ui.progress(_('files'), None)
2145 self.ui.progress(_('files'), None)
2146
2146
2147 for f, needs in needfiles.iteritems():
2147 for f, needs in needfiles.iteritems():
2148 fl = self.file(f)
2148 fl = self.file(f)
2149 for n in needs:
2149 for n in needs:
2150 try:
2150 try:
2151 fl.rev(n)
2151 fl.rev(n)
2152 except error.LookupError:
2152 except error.LookupError:
2153 raise util.Abort(
2153 raise util.Abort(
2154 _('missing file data for %s:%s - run hg verify') %
2154 _('missing file data for %s:%s - run hg verify') %
2155 (f, hex(n)))
2155 (f, hex(n)))
2156
2156
2157 dh = 0
2157 dh = 0
2158 if oldheads:
2158 if oldheads:
2159 heads = cl.heads()
2159 heads = cl.heads()
2160 dh = len(heads) - len(oldheads)
2160 dh = len(heads) - len(oldheads)
2161 for h in heads:
2161 for h in heads:
2162 if h not in oldheads and self[h].closesbranch():
2162 if h not in oldheads and self[h].closesbranch():
2163 dh -= 1
2163 dh -= 1
2164 htext = ""
2164 htext = ""
2165 if dh:
2165 if dh:
2166 htext = _(" (%+d heads)") % dh
2166 htext = _(" (%+d heads)") % dh
2167
2167
2168 self.ui.status(_("added %d changesets"
2168 self.ui.status(_("added %d changesets"
2169 " with %d changes to %d files%s\n")
2169 " with %d changes to %d files%s\n")
2170 % (changesets, revisions, files, htext))
2170 % (changesets, revisions, files, htext))
2171
2171
2172 if changesets > 0:
2172 if changesets > 0:
2173 p = lambda: cl.writepending() and self.root or ""
2173 p = lambda: cl.writepending() and self.root or ""
2174 self.hook('pretxnchangegroup', throw=True,
2174 self.hook('pretxnchangegroup', throw=True,
2175 node=hex(cl.node(clstart)), source=srctype,
2175 node=hex(cl.node(clstart)), source=srctype,
2176 url=url, pending=p)
2176 url=url, pending=p)
2177
2177
2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2179 publishing = self.ui.configbool('phases', 'publish', True)
2179 publishing = self.ui.configbool('phases', 'publish', True)
2180 if srctype == 'push':
2180 if srctype == 'push':
2181 # Old server can not push the boundary themself.
2181 # Old server can not push the boundary themself.
2182 # New server won't push the boundary if changeset already
2182 # New server won't push the boundary if changeset already
2183 # existed locally as secrete
2183 # existed locally as secrete
2184 #
2184 #
2185 # We should not use added here but the list of all change in
2185 # We should not use added here but the list of all change in
2186 # the bundle
2186 # the bundle
2187 if publishing:
2187 if publishing:
2188 phases.advanceboundary(self, phases.public, srccontent)
2188 phases.advanceboundary(self, phases.public, srccontent)
2189 else:
2189 else:
2190 phases.advanceboundary(self, phases.draft, srccontent)
2190 phases.advanceboundary(self, phases.draft, srccontent)
2191 phases.retractboundary(self, phases.draft, added)
2191 phases.retractboundary(self, phases.draft, added)
2192 elif srctype != 'strip':
2192 elif srctype != 'strip':
2193 # publishing only alter behavior during push
2193 # publishing only alter behavior during push
2194 #
2194 #
2195 # strip should not touch boundary at all
2195 # strip should not touch boundary at all
2196 phases.retractboundary(self, phases.draft, added)
2196 phases.retractboundary(self, phases.draft, added)
2197
2197
2198 # make changelog see real files again
2198 # make changelog see real files again
2199 cl.finalize(trp)
2199 cl.finalize(trp)
2200
2200
2201 tr.close()
2201 tr.close()
2202
2202
2203 if changesets > 0:
2203 if changesets > 0:
2204 def runhooks():
2204 def runhooks():
2205 # forcefully update the on-disk branch cache
2205 # forcefully update the on-disk branch cache
2206 self.ui.debug("updating the branch cache\n")
2206 self.ui.debug("updating the branch cache\n")
2207 self.updatebranchcache()
2207 self.updatebranchcache()
2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2209 source=srctype, url=url)
2209 source=srctype, url=url)
2210
2210
2211 for n in added:
2211 for n in added:
2212 self.hook("incoming", node=hex(n), source=srctype,
2212 self.hook("incoming", node=hex(n), source=srctype,
2213 url=url)
2213 url=url)
2214 self._afterlock(runhooks)
2214 self._afterlock(runhooks)
2215
2215
2216 finally:
2216 finally:
2217 tr.release()
2217 tr.release()
2218 # never return 0 here:
2218 # never return 0 here:
2219 if dh < 0:
2219 if dh < 0:
2220 return dh - 1
2220 return dh - 1
2221 else:
2221 else:
2222 return dh + 1
2222 return dh + 1
2223
2223
2224 def stream_in(self, remote, requirements):
2224 def stream_in(self, remote, requirements):
2225 lock = self.lock()
2225 lock = self.lock()
2226 try:
2226 try:
2227 fp = remote.stream_out()
2227 fp = remote.stream_out()
2228 l = fp.readline()
2228 l = fp.readline()
2229 try:
2229 try:
2230 resp = int(l)
2230 resp = int(l)
2231 except ValueError:
2231 except ValueError:
2232 raise error.ResponseError(
2232 raise error.ResponseError(
2233 _('Unexpected response from remote server:'), l)
2233 _('Unexpected response from remote server:'), l)
2234 if resp == 1:
2234 if resp == 1:
2235 raise util.Abort(_('operation forbidden by server'))
2235 raise util.Abort(_('operation forbidden by server'))
2236 elif resp == 2:
2236 elif resp == 2:
2237 raise util.Abort(_('locking the remote repository failed'))
2237 raise util.Abort(_('locking the remote repository failed'))
2238 elif resp != 0:
2238 elif resp != 0:
2239 raise util.Abort(_('the server sent an unknown error code'))
2239 raise util.Abort(_('the server sent an unknown error code'))
2240 self.ui.status(_('streaming all changes\n'))
2240 self.ui.status(_('streaming all changes\n'))
2241 l = fp.readline()
2241 l = fp.readline()
2242 try:
2242 try:
2243 total_files, total_bytes = map(int, l.split(' ', 1))
2243 total_files, total_bytes = map(int, l.split(' ', 1))
2244 except (ValueError, TypeError):
2244 except (ValueError, TypeError):
2245 raise error.ResponseError(
2245 raise error.ResponseError(
2246 _('Unexpected response from remote server:'), l)
2246 _('Unexpected response from remote server:'), l)
2247 self.ui.status(_('%d files to transfer, %s of data\n') %
2247 self.ui.status(_('%d files to transfer, %s of data\n') %
2248 (total_files, util.bytecount(total_bytes)))
2248 (total_files, util.bytecount(total_bytes)))
2249 handled_bytes = 0
2250 self.ui.progress(_('clone'), 0, total=total_bytes)
2249 start = time.time()
2251 start = time.time()
2250 for i in xrange(total_files):
2252 for i in xrange(total_files):
2251 # XXX doesn't support '\n' or '\r' in filenames
2253 # XXX doesn't support '\n' or '\r' in filenames
2252 l = fp.readline()
2254 l = fp.readline()
2253 try:
2255 try:
2254 name, size = l.split('\0', 1)
2256 name, size = l.split('\0', 1)
2255 size = int(size)
2257 size = int(size)
2256 except (ValueError, TypeError):
2258 except (ValueError, TypeError):
2257 raise error.ResponseError(
2259 raise error.ResponseError(
2258 _('Unexpected response from remote server:'), l)
2260 _('Unexpected response from remote server:'), l)
2259 if self.ui.debugflag:
2261 if self.ui.debugflag:
2260 self.ui.debug('adding %s (%s)\n' %
2262 self.ui.debug('adding %s (%s)\n' %
2261 (name, util.bytecount(size)))
2263 (name, util.bytecount(size)))
2262 # for backwards compat, name was partially encoded
2264 # for backwards compat, name was partially encoded
2263 ofp = self.sopener(store.decodedir(name), 'w')
2265 ofp = self.sopener(store.decodedir(name), 'w')
2264 for chunk in util.filechunkiter(fp, limit=size):
2266 for chunk in util.filechunkiter(fp, limit=size):
2267 handled_bytes += len(chunk)
2268 self.ui.progress(_('clone'), handled_bytes,
2269 total=total_bytes)
2265 ofp.write(chunk)
2270 ofp.write(chunk)
2266 ofp.close()
2271 ofp.close()
2267 elapsed = time.time() - start
2272 elapsed = time.time() - start
2268 if elapsed <= 0:
2273 if elapsed <= 0:
2269 elapsed = 0.001
2274 elapsed = 0.001
2275 self.ui.progress(_('clone'), None)
2270 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2276 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2271 (util.bytecount(total_bytes), elapsed,
2277 (util.bytecount(total_bytes), elapsed,
2272 util.bytecount(total_bytes / elapsed)))
2278 util.bytecount(total_bytes / elapsed)))
2273
2279
2274 # new requirements = old non-format requirements +
2280 # new requirements = old non-format requirements +
2275 # new format-related
2281 # new format-related
2276 # requirements from the streamed-in repository
2282 # requirements from the streamed-in repository
2277 requirements.update(set(self.requirements) - self.supportedformats)
2283 requirements.update(set(self.requirements) - self.supportedformats)
2278 self._applyrequirements(requirements)
2284 self._applyrequirements(requirements)
2279 self._writerequirements()
2285 self._writerequirements()
2280
2286
2281 self.invalidate()
2287 self.invalidate()
2282 return len(self.heads()) + 1
2288 return len(self.heads()) + 1
2283 finally:
2289 finally:
2284 lock.release()
2290 lock.release()
2285
2291
2286 def clone(self, remote, heads=[], stream=False):
2292 def clone(self, remote, heads=[], stream=False):
2287 '''clone remote repository.
2293 '''clone remote repository.
2288
2294
2289 keyword arguments:
2295 keyword arguments:
2290 heads: list of revs to clone (forces use of pull)
2296 heads: list of revs to clone (forces use of pull)
2291 stream: use streaming clone if possible'''
2297 stream: use streaming clone if possible'''
2292
2298
2293 # now, all clients that can request uncompressed clones can
2299 # now, all clients that can request uncompressed clones can
2294 # read repo formats supported by all servers that can serve
2300 # read repo formats supported by all servers that can serve
2295 # them.
2301 # them.
2296
2302
2297 # if revlog format changes, client will have to check version
2303 # if revlog format changes, client will have to check version
2298 # and format flags on "stream" capability, and use
2304 # and format flags on "stream" capability, and use
2299 # uncompressed only if compatible.
2305 # uncompressed only if compatible.
2300
2306
2301 if not stream:
2307 if not stream:
2302 # if the server explicitely prefer to stream (for fast LANs)
2308 # if the server explicitely prefer to stream (for fast LANs)
2303 stream = remote.capable('stream-preferred')
2309 stream = remote.capable('stream-preferred')
2304
2310
2305 if stream and not heads:
2311 if stream and not heads:
2306 # 'stream' means remote revlog format is revlogv1 only
2312 # 'stream' means remote revlog format is revlogv1 only
2307 if remote.capable('stream'):
2313 if remote.capable('stream'):
2308 return self.stream_in(remote, set(('revlogv1',)))
2314 return self.stream_in(remote, set(('revlogv1',)))
2309 # otherwise, 'streamreqs' contains the remote revlog format
2315 # otherwise, 'streamreqs' contains the remote revlog format
2310 streamreqs = remote.capable('streamreqs')
2316 streamreqs = remote.capable('streamreqs')
2311 if streamreqs:
2317 if streamreqs:
2312 streamreqs = set(streamreqs.split(','))
2318 streamreqs = set(streamreqs.split(','))
2313 # if we support it, stream in and adjust our requirements
2319 # if we support it, stream in and adjust our requirements
2314 if not streamreqs - self.supportedformats:
2320 if not streamreqs - self.supportedformats:
2315 return self.stream_in(remote, streamreqs)
2321 return self.stream_in(remote, streamreqs)
2316 return self.pull(remote, heads)
2322 return self.pull(remote, heads)
2317
2323
2318 def pushkey(self, namespace, key, old, new):
2324 def pushkey(self, namespace, key, old, new):
2319 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2325 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2320 old=old, new=new)
2326 old=old, new=new)
2321 ret = pushkey.push(self, namespace, key, old, new)
2327 ret = pushkey.push(self, namespace, key, old, new)
2322 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2328 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2323 ret=ret)
2329 ret=ret)
2324 return ret
2330 return ret
2325
2331
2326 def listkeys(self, namespace):
2332 def listkeys(self, namespace):
2327 self.hook('prelistkeys', throw=True, namespace=namespace)
2333 self.hook('prelistkeys', throw=True, namespace=namespace)
2328 values = pushkey.list(self, namespace)
2334 values = pushkey.list(self, namespace)
2329 self.hook('listkeys', namespace=namespace, values=values)
2335 self.hook('listkeys', namespace=namespace, values=values)
2330 return values
2336 return values
2331
2337
2332 def debugwireargs(self, one, two, three=None, four=None, five=None):
2338 def debugwireargs(self, one, two, three=None, four=None, five=None):
2333 '''used to test argument passing over the wire'''
2339 '''used to test argument passing over the wire'''
2334 return "%s %s %s %s %s" % (one, two, three, four, five)
2340 return "%s %s %s %s %s" % (one, two, three, four, five)
2335
2341
2336 def savecommitmessage(self, text):
2342 def savecommitmessage(self, text):
2337 fp = self.opener('last-message.txt', 'wb')
2343 fp = self.opener('last-message.txt', 'wb')
2338 try:
2344 try:
2339 fp.write(text)
2345 fp.write(text)
2340 finally:
2346 finally:
2341 fp.close()
2347 fp.close()
2342 return self.pathto(fp.name[len(self.root)+1:])
2348 return self.pathto(fp.name[len(self.root)+1:])
2343
2349
2344 # used to avoid circular references so destructors work
2350 # used to avoid circular references so destructors work
2345 def aftertrans(files):
2351 def aftertrans(files):
2346 renamefiles = [tuple(t) for t in files]
2352 renamefiles = [tuple(t) for t in files]
2347 def a():
2353 def a():
2348 for src, dest in renamefiles:
2354 for src, dest in renamefiles:
2349 try:
2355 try:
2350 util.rename(src, dest)
2356 util.rename(src, dest)
2351 except OSError: # journal file does not yet exist
2357 except OSError: # journal file does not yet exist
2352 pass
2358 pass
2353 return a
2359 return a
2354
2360
2355 def undoname(fn):
2361 def undoname(fn):
2356 base, name = os.path.split(fn)
2362 base, name = os.path.split(fn)
2357 assert name.startswith('journal')
2363 assert name.startswith('journal')
2358 return os.path.join(base, name.replace('journal', 'undo', 1))
2364 return os.path.join(base, name.replace('journal', 'undo', 1))
2359
2365
2360 def instance(ui, path, create):
2366 def instance(ui, path, create):
2361 return localrepository(ui, util.urllocalpath(path), create)
2367 return localrepository(ui, util.urllocalpath(path), create)
2362
2368
2363 def islocal(path):
2369 def islocal(path):
2364 return True
2370 return True
General Comments 0
You need to be logged in to leave comments. Login now